github.com/mweagle/Sparta@v1.15.0/provision_build.go (about)

     1  // +build !lambdabinary
     2  
     3  package sparta
     4  
     5  import (
     6  	"archive/zip"
     7  	"bytes"
     8  	"crypto/sha1"
     9  	"encoding/hex"
    10  	"encoding/json"
    11  	"fmt"
    12  	"io"
    13  	"net/url"
    14  	"os"
    15  	"path"
    16  	"path/filepath"
    17  	"reflect"
    18  	"runtime"
    19  	"strings"
    20  	"sync"
    21  	"time"
    22  
    23  	"github.com/aws/aws-sdk-go/aws"
    24  	"github.com/aws/aws-sdk-go/aws/awserr"
    25  	"github.com/aws/aws-sdk-go/aws/session"
    26  	"github.com/aws/aws-sdk-go/service/cloudformation"
    27  	"github.com/aws/aws-sdk-go/service/iam"
    28  	"github.com/aws/aws-sdk-go/service/lambda"
    29  	humanize "github.com/dustin/go-humanize"
    30  	spartaAWS "github.com/mweagle/Sparta/aws"
    31  	spartaCF "github.com/mweagle/Sparta/aws/cloudformation"
    32  	spartaS3 "github.com/mweagle/Sparta/aws/s3"
    33  	"github.com/mweagle/Sparta/system"
    34  	spartaZip "github.com/mweagle/Sparta/zip"
    35  	gocc "github.com/mweagle/go-cloudcondenser"
    36  	gocf "github.com/mweagle/go-cloudformation"
    37  	"github.com/pkg/errors"
    38  	"github.com/sirupsen/logrus"
    39  )
    40  
    41  ////////////////////////////////////////////////////////////////////////////////
    42  // CONSTANTS
    43  ////////////////////////////////////////////////////////////////////////////////
    44  func spartaTagName(baseKey string) string {
    45  	return fmt.Sprintf("io:gosparta:%s", baseKey)
    46  }
    47  
    48  var (
    49  	// SpartaTagBuildIDKey is the keyname used in the CloudFormation Output
    50  	// that stores the user-supplied or automatically generated BuildID
    51  	// for this run
    52  	SpartaTagBuildIDKey = spartaTagName("buildId")
    53  
    54  	// SpartaTagBuildTagsKey is the keyname used in the CloudFormation Output
    55  	// that stores the optional user-supplied golang build tags
    56  	SpartaTagBuildTagsKey = spartaTagName("buildTags")
    57  )
    58  
    59  // finalizerFunction is the type of function pushed onto the cleanup stack
    60  type finalizerFunction func(logger *logrus.Logger)
    61  
    62  ////////////////////////////////////////////////////////////////////////////////
    63  // Type that encapsulates an S3 URL with accessors to return either the
    64  // full URL or just the valid S3 Keyname
    65  type s3UploadURL struct {
    66  	location string
    67  	path     string
    68  	version  string
    69  }
    70  
    71  func (s3URL *s3UploadURL) keyName() string {
    72  	return s3URL.path
    73  }
    74  
    75  func newS3UploadURL(s3URL string) *s3UploadURL {
    76  	urlParts, urlPartsErr := url.Parse(s3URL)
    77  	if nil != urlPartsErr {
    78  		return nil
    79  	}
    80  	queryParams, queryParamsErr := url.ParseQuery(urlParts.RawQuery)
    81  	if nil != queryParamsErr {
    82  		return nil
    83  	}
    84  	versionIDValues := queryParams["versionId"]
    85  	version := ""
    86  	if len(versionIDValues) == 1 {
    87  		version = versionIDValues[0]
    88  	}
    89  	return &s3UploadURL{location: s3URL,
    90  		path:    strings.TrimPrefix(urlParts.Path, "/"),
    91  		version: version}
    92  }
    93  
    94  ////////////////////////////////////////////////////////////////////////////////
    95  
    96  func codeZipKey(url *s3UploadURL) string {
    97  	if url == nil {
    98  		return ""
    99  	}
   100  	return url.keyName()
   101  }
   102  func codeZipVersion(url *s3UploadURL) string {
   103  	if url == nil {
   104  		return ""
   105  	}
   106  	return url.version
   107  }
   108  
   109  ////////////////////////////////////////////////////////////////////////////////
   110  // Represents data associated with provisioning the S3 Site iff defined
   111  type s3SiteContext struct {
   112  	s3Site      *S3Site
   113  	s3UploadURL *s3UploadURL
   114  }
   115  
   116  // Type of a workflow step.  Each step is responsible
   117  // for returning the next step or an error if the overall
   118  // workflow should stop.
   119  type workflowStep func(ctx *workflowContext) (workflowStep, error)
   120  
   121  // workflowStepDuration represents a discrete step in the provisioning
   122  // workflow.
   123  type workflowStepDuration struct {
   124  	name     string
   125  	duration time.Duration
   126  }
   127  
   128  // userdata is user-supplied, code related values
   129  type userdata struct {
   130  	// Is this is a -dry-run?
   131  	noop bool
   132  	// Is this a CGO enabled build?
   133  	useCGO bool
   134  	// Are in-place updates enabled?
   135  	inPlace bool
   136  	// The user-supplied or automatically generated BuildID
   137  	buildID string
   138  	// Optional user-supplied build tags
   139  	buildTags string
   140  	// Optional link flags
   141  	linkFlags string
   142  	// Canonical basename of the service.  Also used as the CloudFormation
   143  	// stack name
   144  	serviceName string
   145  	// Service description
   146  	serviceDescription string
   147  	// The slice of Lambda functions that constitute the service
   148  	lambdaAWSInfos []*LambdaAWSInfo
   149  	// User supplied workflow hooks
   150  	workflowHooks *WorkflowHooks
   151  	// Code pipeline S3 trigger keyname
   152  	codePipelineTrigger string
   153  	// Optional APIGateway definition to associate with this service
   154  	api APIGateway
   155  	// Optional S3 site data to provision together with this service
   156  	s3SiteContext *s3SiteContext
   157  	// The user-supplied S3 bucket where service artifacts should be posted.
   158  	s3Bucket string
   159  }
   160  
   161  // context is data that is mutated during the provisioning workflow
   162  type provisionContext struct {
   163  	// Information about the ZIP archive that contains the LambdaCode source
   164  	s3CodeZipURL *s3UploadURL
   165  	// AWS Session to be used for all API calls made in the process of provisioning
   166  	// this service.
   167  	awsSession *session.Session
   168  	// Cached IAM role name map.  Used to support dynamic and static IAM role
   169  	// names.  Static ARN role names are checked for existence via AWS APIs
   170  	// prior to CloudFormation provisioning.
   171  	lambdaIAMRoleNameMap map[string]*gocf.StringExpr
   172  	// IO writer for autogenerated template results
   173  	templateWriter io.Writer
   174  	// CloudFormation Template
   175  	cfTemplate *gocf.Template
   176  	// Is versioning enabled for s3 Bucket?
   177  	s3BucketVersioningEnabled bool
   178  	// name of the binary inside the ZIP archive
   179  	binaryName string
   180  	// Context to pass between workflow operations
   181  	workflowHooksContext map[string]interface{}
   182  }
   183  
   184  // similar to context, transaction scopes values that span the entire
   185  // provisioning step
   186  type transaction struct {
   187  	startTime time.Time
   188  	// Optional rollback functions that workflow steps may append to if they
   189  	// have made mutations during provisioning.
   190  	rollbackFunctions []spartaS3.RollbackFunction
   191  	// Optional finalizer functions that are unconditionally executed following
   192  	// workflow completion, success or failure
   193  	finalizerFunctions []finalizerFunction
   194  	// Timings that measure how long things actually took
   195  	stepDurations []*workflowStepDuration
   196  }
   197  
   198  ////////////////////////////////////////////////////////////////////////////////
   199  // Workflow context
   200  // The workflow context is created by `provision` and provided to all
   201  // functions that constitute the provisioning workflow.
   202  type workflowContext struct {
   203  	// User supplied data that's Lambda specific
   204  	userdata userdata
   205  	// Context that's mutated across the workflow steps
   206  	context provisionContext
   207  	// Transaction-scoped information thats mutated across the workflow
   208  	// steps
   209  	transaction transaction
   210  	// Preconfigured logger
   211  	logger *logrus.Logger
   212  }
   213  
   214  // recordDuration is a utility function to record how long
   215  func recordDuration(start time.Time, name string, ctx *workflowContext) {
   216  	elapsed := time.Since(start)
   217  	ctx.transaction.stepDurations = append(ctx.transaction.stepDurations,
   218  		&workflowStepDuration{
   219  			name:     name,
   220  			duration: elapsed,
   221  		})
   222  }
   223  
   224  // Register a rollback function in the event that the provisioning
   225  // function failed.
   226  func (ctx *workflowContext) registerRollback(userFunction spartaS3.RollbackFunction) {
   227  	if nil == ctx.transaction.rollbackFunctions || len(ctx.transaction.rollbackFunctions) <= 0 {
   228  		ctx.transaction.rollbackFunctions = make([]spartaS3.RollbackFunction, 0)
   229  	}
   230  	ctx.transaction.rollbackFunctions = append(ctx.transaction.rollbackFunctions, userFunction)
   231  }
   232  
   233  // Register a rollback function in the event that the provisioning
   234  // function failed.
   235  func (ctx *workflowContext) registerFinalizer(userFunction finalizerFunction) {
   236  	if nil == ctx.transaction.finalizerFunctions || len(ctx.transaction.finalizerFunctions) <= 0 {
   237  		ctx.transaction.finalizerFunctions = make([]finalizerFunction, 0)
   238  	}
   239  	ctx.transaction.finalizerFunctions = append(ctx.transaction.finalizerFunctions, userFunction)
   240  }
   241  
   242  // Register a finalizer that cleans up local artifacts
   243  func (ctx *workflowContext) registerFileCleanupFinalizer(localPath string) {
   244  	cleanup := func(logger *logrus.Logger) {
   245  		errRemove := os.Remove(localPath)
   246  		if nil != errRemove {
   247  			logger.WithFields(logrus.Fields{
   248  				"Path":  localPath,
   249  				"Error": errRemove,
   250  			}).Warn("Failed to cleanup intermediate artifact")
   251  		} else {
   252  			logger.WithFields(logrus.Fields{
   253  				"Path": relativePath(localPath),
   254  			}).Debug("Build artifact deleted")
   255  		}
   256  	}
   257  	ctx.registerFinalizer(cleanup)
   258  }
   259  
   260  // Run any provided rollback functions
   261  func (ctx *workflowContext) rollback() {
   262  	defer recordDuration(time.Now(), "Rollback", ctx)
   263  
   264  	// Run each cleanup function concurrently.  If there's an error
   265  	// all we're going to do is log it as a warning, since at this
   266  	// point there's nothing to do...
   267  	ctx.logger.Info("Invoking rollback functions")
   268  	var wg sync.WaitGroup
   269  	wg.Add(len(ctx.transaction.rollbackFunctions))
   270  	rollbackErr := callRollbackHook(ctx, &wg)
   271  	if rollbackErr != nil {
   272  		ctx.logger.WithFields(logrus.Fields{
   273  			"Error": rollbackErr,
   274  		}).Warning("Rollback Hook failed to execute")
   275  	}
   276  	for _, eachCleanup := range ctx.transaction.rollbackFunctions {
   277  		go func(cleanupFunc spartaS3.RollbackFunction, goLogger *logrus.Logger) {
   278  			// Decrement the counter when the goroutine completes.
   279  			defer wg.Done()
   280  			// Fetch the URL.
   281  			err := cleanupFunc(goLogger)
   282  			if nil != err {
   283  				ctx.logger.WithFields(logrus.Fields{
   284  					"Error": err,
   285  				}).Warning("Failed to cleanup resource")
   286  			}
   287  		}(eachCleanup, ctx.logger)
   288  	}
   289  	wg.Wait()
   290  }
   291  
   292  ////////////////////////////////////////////////////////////////////////////////
   293  // Private - START
   294  //
   295  
   296  // logFilesize outputs a friendly filesize for the given filepath
   297  func logFilesize(message string, filePath string, logger *logrus.Logger) {
   298  	// Binary size
   299  	stat, err := os.Stat(filePath)
   300  	if err == nil {
   301  		logger.WithFields(logrus.Fields{
   302  			"Size": humanize.Bytes(uint64(stat.Size())),
   303  		}).Info(message)
   304  	}
   305  }
   306  
   307  // Encapsulate calling the rollback hooks
   308  func callRollbackHook(ctx *workflowContext, wg *sync.WaitGroup) error {
   309  	if ctx.userdata.workflowHooks == nil {
   310  		return nil
   311  	}
   312  	rollbackHooks := ctx.userdata.workflowHooks.Rollbacks
   313  	if ctx.userdata.workflowHooks.Rollback != nil {
   314  		ctx.logger.Warn("DEPRECATED: Single RollbackHook superseded by RollbackHookHandler slice")
   315  		rollbackHooks = append(rollbackHooks,
   316  			RollbackHookFunc(ctx.userdata.workflowHooks.Rollback))
   317  	}
   318  	for _, eachRollbackHook := range rollbackHooks {
   319  		wg.Add(1)
   320  		go func(handler RollbackHookHandler, context map[string]interface{},
   321  			serviceName string,
   322  			awsSession *session.Session,
   323  			noop bool,
   324  			logger *logrus.Logger) {
   325  			// Decrement the counter when the goroutine completes.
   326  			defer wg.Done()
   327  			rollbackErr := handler.Rollback(context,
   328  				serviceName,
   329  				awsSession,
   330  				noop,
   331  				logger)
   332  			logger.WithFields(logrus.Fields{
   333  				"Error": rollbackErr,
   334  			}).Warn("Rollback function failed to complete")
   335  		}(eachRollbackHook,
   336  			ctx.context.workflowHooksContext,
   337  			ctx.userdata.serviceName,
   338  			ctx.context.awsSession,
   339  			ctx.userdata.noop,
   340  			ctx.logger)
   341  	}
   342  	return nil
   343  }
   344  
   345  // Encapsulate calling the service decorator hooks
   346  func callServiceDecoratorHook(ctx *workflowContext) error {
   347  	if ctx.userdata.workflowHooks == nil {
   348  		return nil
   349  	}
   350  	serviceHooks := ctx.userdata.workflowHooks.ServiceDecorators
   351  	if ctx.userdata.workflowHooks.ServiceDecorator != nil {
   352  		ctx.logger.Warn("DEPRECATED: Single ServiceDecorator hook superseded by ServiceDecorators slice")
   353  		serviceHooks = append(serviceHooks,
   354  			ServiceDecoratorHookFunc(ctx.userdata.workflowHooks.ServiceDecorator))
   355  	}
   356  	// If there's an API gateway definition, include the resources that provision it.
   357  	// Since this export will likely
   358  	// generate outputs that the s3 site needs, we'll use a temporary outputs accumulator,
   359  	// pass that to the S3Site
   360  	// if it's defined, and then merge it with the normal output map.-
   361  	for eachIndex, eachServiceHook := range serviceHooks {
   362  		funcPtr := reflect.ValueOf(eachServiceHook).Pointer()
   363  		funcForPC := runtime.FuncForPC(funcPtr)
   364  		hookName := funcForPC.Name()
   365  		if hookName == "" {
   366  			hookName = fmt.Sprintf("ServiceHook[%d]", eachIndex)
   367  		}
   368  		ctx.logger.WithFields(logrus.Fields{
   369  			"ServiceDecoratorHook": hookName,
   370  			"WorkflowHookContext":  ctx.context.workflowHooksContext,
   371  		}).Info("Calling WorkflowHook")
   372  
   373  		serviceTemplate := gocf.NewTemplate()
   374  		decoratorError := eachServiceHook.DecorateService(ctx.context.workflowHooksContext,
   375  			ctx.userdata.serviceName,
   376  			serviceTemplate,
   377  			ctx.userdata.s3Bucket,
   378  			codeZipKey(ctx.context.s3CodeZipURL),
   379  			ctx.userdata.buildID,
   380  			ctx.context.awsSession,
   381  			ctx.userdata.noop,
   382  			ctx.logger)
   383  		if nil != decoratorError {
   384  			return decoratorError
   385  		}
   386  		safeMergeErrs := gocc.SafeMerge(serviceTemplate, ctx.context.cfTemplate)
   387  		if len(safeMergeErrs) != 0 {
   388  			return errors.Errorf("Failed to merge templates: %#v", safeMergeErrs)
   389  		}
   390  	}
   391  	return nil
   392  }
   393  
   394  // Encapsulate calling the archive hooks
   395  func callArchiveHook(lambdaArchive *zip.Writer,
   396  	ctx *workflowContext) error {
   397  
   398  	if ctx.userdata.workflowHooks == nil {
   399  		return nil
   400  	}
   401  	archiveHooks := ctx.userdata.workflowHooks.Archives
   402  	if ctx.userdata.workflowHooks.Archive != nil {
   403  		ctx.logger.Warn("DEPRECATED: Single ArchiveHook hook superseded by ArchiveHooks slice")
   404  		archiveHooks = append(archiveHooks,
   405  			ArchiveHookFunc(ctx.userdata.workflowHooks.Archive))
   406  	}
   407  	for _, eachArchiveHook := range archiveHooks {
   408  		// Run the hook
   409  		ctx.logger.WithFields(logrus.Fields{
   410  			"WorkflowHookContext": ctx.context.workflowHooksContext,
   411  		}).Info("Calling ArchiveHook")
   412  		hookErr := eachArchiveHook.DecorateArchive(ctx.context.workflowHooksContext,
   413  			ctx.userdata.serviceName,
   414  			lambdaArchive,
   415  			ctx.context.awsSession,
   416  			ctx.userdata.noop,
   417  			ctx.logger)
   418  		if hookErr != nil {
   419  			return errors.Wrapf(hookErr, "DecorateArchive returned an error")
   420  		}
   421  	}
   422  	return nil
   423  }
   424  
   425  // Encapsulate calling a workflow hook
   426  func callWorkflowHook(hookPhase string,
   427  	hook WorkflowHook,
   428  	hooks []WorkflowHookHandler,
   429  	ctx *workflowContext) error {
   430  
   431  	if hook != nil {
   432  		ctx.logger.Warn(fmt.Sprintf("DEPRECATED: Single %s hook superseded by %ss slice",
   433  			hookPhase,
   434  			hookPhase))
   435  		hooks = append(hooks, WorkflowHookFunc(hook))
   436  	}
   437  	for _, eachHook := range hooks {
   438  		// Run the hook
   439  		ctx.logger.WithFields(logrus.Fields{
   440  			"Phase":               hookPhase,
   441  			"WorkflowHookContext": ctx.context.workflowHooksContext,
   442  		}).Info("Calling WorkflowHook")
   443  
   444  		hookErr := eachHook.DecorateWorkflow(ctx.context.workflowHooksContext,
   445  			ctx.userdata.serviceName,
   446  			ctx.userdata.s3Bucket,
   447  			ctx.userdata.buildID,
   448  			ctx.context.awsSession,
   449  			ctx.userdata.noop,
   450  			ctx.logger)
   451  		if hookErr != nil {
   452  			return errors.Wrapf(hookErr, "DecorateWorkflow returned an error")
   453  		}
   454  	}
   455  	return nil
   456  }
   457  
   458  // Encapsulate calling the validation hooks
   459  func callValidationHooks(validationHooks []ServiceValidationHookHandler,
   460  	template *gocf.Template,
   461  	ctx *workflowContext) error {
   462  
   463  	var marshaledTemplate []byte
   464  	if len(validationHooks) != 0 {
   465  		jsonBytes, jsonBytesErr := json.Marshal(template)
   466  		if jsonBytesErr != nil {
   467  			return errors.Wrapf(jsonBytesErr, "Failed to marshal template for validation")
   468  		}
   469  		marshaledTemplate = jsonBytes
   470  	}
   471  
   472  	for _, eachHook := range validationHooks {
   473  		// Run the hook
   474  		ctx.logger.WithFields(logrus.Fields{
   475  			"Phase":                 "Validation",
   476  			"ValidationHookContext": ctx.context.workflowHooksContext,
   477  		}).Info("Calling WorkflowHook")
   478  
   479  		var loopTemplate gocf.Template
   480  		unmarshalErr := json.Unmarshal(marshaledTemplate, &loopTemplate)
   481  		if unmarshalErr != nil {
   482  			return errors.Wrapf(unmarshalErr,
   483  				"Failed to unmarshal read-only copy of template for Validation")
   484  		}
   485  
   486  		hookErr := eachHook.ValidateService(ctx.context.workflowHooksContext,
   487  			ctx.userdata.serviceName,
   488  			&loopTemplate,
   489  			ctx.userdata.s3Bucket,
   490  			codeZipKey(ctx.context.s3CodeZipURL),
   491  			ctx.userdata.buildID,
   492  			ctx.context.awsSession,
   493  			ctx.userdata.noop,
   494  			ctx.logger)
   495  		if hookErr != nil {
   496  			return errors.Wrapf(hookErr, "Service failed to pass validation")
   497  		}
   498  	}
   499  	return nil
   500  }
   501  
   502  // versionAwareS3KeyName returns a keyname that provides the correct cache
   503  // invalidation semantics based on whether the target bucket
   504  // has versioning enabled
   505  func versionAwareS3KeyName(s3DefaultKey string, s3VersioningEnabled bool, logger *logrus.Logger) (string, error) {
   506  	versionKeyName := s3DefaultKey
   507  	if !s3VersioningEnabled {
   508  		var extension = path.Ext(s3DefaultKey)
   509  		var prefixString = strings.TrimSuffix(s3DefaultKey, extension)
   510  
   511  		hash := sha1.New()
   512  		salt := fmt.Sprintf("%s-%d", s3DefaultKey, time.Now().UnixNano())
   513  		_, writeErr := hash.Write([]byte(salt))
   514  		if writeErr != nil {
   515  			return "", errors.Wrapf(writeErr, "Failed to update hash digest")
   516  		}
   517  		versionKeyName = fmt.Sprintf("%s-%s%s",
   518  			prefixString,
   519  			hex.EncodeToString(hash.Sum(nil)),
   520  			extension)
   521  
   522  		logger.WithFields(logrus.Fields{
   523  			"Default":      s3DefaultKey,
   524  			"Extension":    extension,
   525  			"PrefixString": prefixString,
   526  			"Unique":       versionKeyName,
   527  		}).Debug("Created unique S3 keyname")
   528  	}
   529  	return versionKeyName, nil
   530  }
   531  
   532  // Upload a local file to S3.  Returns the full S3 URL to the file that was
   533  // uploaded. If the target bucket does not have versioning enabled,
   534  // this function will automatically make a new key to ensure uniqueness
   535  func uploadLocalFileToS3(localPath string, s3ObjectKey string, ctx *workflowContext) (string, error) {
   536  
   537  	// If versioning is enabled, use a stable name, otherwise use a name
   538  	// that's dynamically created. By default assume that the bucket is
   539  	// enabled for versioning
   540  	if s3ObjectKey == "" {
   541  		defaultS3KeyName := fmt.Sprintf("%s/%s", ctx.userdata.serviceName, filepath.Base(localPath))
   542  		s3KeyName, s3KeyNameErr := versionAwareS3KeyName(defaultS3KeyName,
   543  			ctx.context.s3BucketVersioningEnabled,
   544  			ctx.logger)
   545  		if nil != s3KeyNameErr {
   546  			return "", errors.Wrapf(s3KeyNameErr, "Failed to create version aware S3 keyname")
   547  		}
   548  		s3ObjectKey = s3KeyName
   549  	}
   550  
   551  	s3URL := ""
   552  	if ctx.userdata.noop {
   553  
   554  		// Binary size
   555  		filesize := int64(0)
   556  		stat, statErr := os.Stat(localPath)
   557  		if statErr == nil {
   558  			filesize = stat.Size()
   559  		}
   560  		ctx.logger.WithFields(logrus.Fields{
   561  			"Bucket": ctx.userdata.s3Bucket,
   562  			"Key":    s3ObjectKey,
   563  			"File":   filepath.Base(localPath),
   564  			"Size":   humanize.Bytes(uint64(filesize)),
   565  		}).Info(noopMessage("S3 upload"))
   566  		s3URL = fmt.Sprintf("https://%s-s3.amazonaws.com/%s",
   567  			ctx.userdata.s3Bucket,
   568  			s3ObjectKey)
   569  	} else {
   570  		// Make sure we mark things for cleanup in case there's a problem
   571  		ctx.registerFileCleanupFinalizer(localPath)
   572  		// Then upload it
   573  		uploadLocation, uploadURLErr := spartaS3.UploadLocalFileToS3(localPath,
   574  			ctx.context.awsSession,
   575  			ctx.userdata.s3Bucket,
   576  			s3ObjectKey,
   577  			ctx.logger)
   578  		if nil != uploadURLErr {
   579  			return "", errors.Wrapf(uploadURLErr, "Failed to upload local file to S3")
   580  		}
   581  		s3URL = uploadLocation
   582  		ctx.registerRollback(spartaS3.CreateS3RollbackFunc(ctx.context.awsSession, uploadLocation))
   583  	}
   584  	return s3URL, nil
   585  }
   586  
   587  // Private - END
   588  ////////////////////////////////////////////////////////////////////////////////
   589  
   590  ////////////////////////////////////////////////////////////////////////////////
   591  // Workflow steps
   592  ////////////////////////////////////////////////////////////////////////////////
   593  
   594  func showOptionalAWSUsageInfo(err error, logger *logrus.Logger) {
   595  	if err == nil {
   596  		return
   597  	}
   598  	userAWSErr, userAWSErrOk := err.(awserr.Error)
   599  	if userAWSErrOk {
   600  		if strings.Contains(userAWSErr.Error(), "could not find region configuration") {
   601  			logger.Error("")
   602  			logger.Error("Consider setting env.AWS_REGION, env.AWS_DEFAULT_REGION, or env.AWS_SDK_LOAD_CONFIG to resolve this issue.")
   603  			logger.Error("See the documentation at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html for more information.")
   604  			logger.Error("")
   605  		}
   606  	}
   607  }
   608  
   609  // Verify & cache the IAM rolename to ARN mapping
   610  func verifyIAMRoles(ctx *workflowContext) (workflowStep, error) {
   611  	defer recordDuration(time.Now(), "Verifying IAM roles", ctx)
   612  
   613  	// The map is either a literal Arn from a pre-existing role name
   614  	// or a gocf.RefFunc() value.
   615  	// Don't verify them, just create them...
   616  	ctx.logger.Info("Verifying IAM Lambda execution roles")
   617  	ctx.context.lambdaIAMRoleNameMap = make(map[string]*gocf.StringExpr)
   618  	iamSvc := iam.New(ctx.context.awsSession)
   619  
   620  	// Assemble all the RoleNames and validate the inline IAMRoleDefinitions
   621  	var allRoleNames []string
   622  	for _, eachLambdaInfo := range ctx.userdata.lambdaAWSInfos {
   623  		if eachLambdaInfo.RoleName != "" {
   624  			allRoleNames = append(allRoleNames, eachLambdaInfo.RoleName)
   625  		}
   626  		// Custom resources?
   627  		for _, eachCustomResource := range eachLambdaInfo.customResources {
   628  			if eachCustomResource.roleName != "" {
   629  				allRoleNames = append(allRoleNames, eachCustomResource.roleName)
   630  			}
   631  		}
   632  		// Profiling enabled?
   633  		if profileDecorator != nil {
   634  			profileErr := profileDecorator(ctx.userdata.serviceName,
   635  				eachLambdaInfo,
   636  				ctx.userdata.s3Bucket,
   637  				ctx.logger)
   638  			if profileErr != nil {
   639  				return nil, errors.Wrapf(profileErr, "Failed to call lambda profile decorator")
   640  			}
   641  		}
   642  
   643  		// Validate the IAMRoleDefinitions associated
   644  		if nil != eachLambdaInfo.RoleDefinition {
   645  			logicalName := eachLambdaInfo.RoleDefinition.logicalName(ctx.userdata.serviceName, eachLambdaInfo.lambdaFunctionName())
   646  			_, exists := ctx.context.lambdaIAMRoleNameMap[logicalName]
   647  			if !exists {
   648  				// Insert it into the resource creation map and add
   649  				// the "Ref" entry to the hashmap
   650  				ctx.context.cfTemplate.AddResource(logicalName,
   651  					eachLambdaInfo.RoleDefinition.toResource(eachLambdaInfo.EventSourceMappings,
   652  						eachLambdaInfo.Options,
   653  						ctx.logger))
   654  
   655  				ctx.context.lambdaIAMRoleNameMap[logicalName] = gocf.GetAtt(logicalName, "Arn")
   656  			}
   657  		}
   658  
   659  		// And the custom resource IAMRoles as well...
   660  		for _, eachCustomResource := range eachLambdaInfo.customResources {
   661  			if nil != eachCustomResource.roleDefinition {
   662  				customResourceLogicalName := eachCustomResource.roleDefinition.logicalName(ctx.userdata.serviceName,
   663  					eachCustomResource.userFunctionName)
   664  
   665  				_, exists := ctx.context.lambdaIAMRoleNameMap[customResourceLogicalName]
   666  				if !exists {
   667  					ctx.context.cfTemplate.AddResource(customResourceLogicalName,
   668  						eachCustomResource.roleDefinition.toResource(nil,
   669  							eachCustomResource.options,
   670  							ctx.logger))
   671  					ctx.context.lambdaIAMRoleNameMap[customResourceLogicalName] = gocf.GetAtt(customResourceLogicalName, "Arn")
   672  				}
   673  			}
   674  		}
   675  	}
   676  
   677  	// Then check all the RoleName literals
   678  	for _, eachRoleName := range allRoleNames {
   679  		_, exists := ctx.context.lambdaIAMRoleNameMap[eachRoleName]
   680  		if !exists {
   681  			// Check the role
   682  			params := &iam.GetRoleInput{
   683  				RoleName: aws.String(eachRoleName),
   684  			}
   685  			ctx.logger.Debug("Checking IAM RoleName: ", eachRoleName)
   686  			resp, err := iamSvc.GetRole(params)
   687  			if err != nil {
   688  				return nil, err
   689  			}
   690  			// Cache it - we'll need it later when we create the
   691  			// CloudFormation template which needs the execution Arn (not role)
   692  			ctx.context.lambdaIAMRoleNameMap[eachRoleName] = gocf.String(*resp.Role.Arn)
   693  		}
   694  	}
   695  	ctx.logger.WithFields(logrus.Fields{
   696  		"Count": len(ctx.context.lambdaIAMRoleNameMap),
   697  	}).Info("IAM roles verified")
   698  
   699  	return verifyAWSPreconditions, nil
   700  }
   701  
   702  // Verify that everything is setup in AWS before we start building things
   703  func verifyAWSPreconditions(ctx *workflowContext) (workflowStep, error) {
   704  	defer recordDuration(time.Now(), "Verifying AWS preconditions", ctx)
   705  
   706  	// If this a NOOP, assume that versioning is not enabled
   707  	if ctx.userdata.noop {
   708  		ctx.logger.WithFields(logrus.Fields{
   709  			"VersioningEnabled": false,
   710  			"Bucket":            ctx.userdata.s3Bucket,
   711  			"Region":            *ctx.context.awsSession.Config.Region,
   712  		}).Info(noopMessage("S3 preconditions check"))
   713  	} else if len(ctx.userdata.lambdaAWSInfos) != 0 {
   714  		// We only need to check this if we're going to upload a ZIP, which
   715  		// isn't always true in the case of a Step function...
   716  		// Bucket versioning
   717  		// Get the S3 bucket and see if it has versioning enabled
   718  		isEnabled, versioningPolicyErr := spartaS3.BucketVersioningEnabled(ctx.context.awsSession,
   719  			ctx.userdata.s3Bucket,
   720  			ctx.logger)
   721  		if nil != versioningPolicyErr {
   722  			// If this is an error and suggests missing region, output some helpful error text
   723  			return nil, versioningPolicyErr
   724  		}
   725  		ctx.logger.WithFields(logrus.Fields{
   726  			"VersioningEnabled": isEnabled,
   727  			"Bucket":            ctx.userdata.s3Bucket,
   728  		}).Info("Checking S3 versioning")
   729  		ctx.context.s3BucketVersioningEnabled = isEnabled
   730  		if ctx.userdata.codePipelineTrigger != "" && !isEnabled {
   731  			return nil, fmt.Errorf("s3 Bucket (%s) for CodePipeline trigger doesn't have a versioning policy enabled", ctx.userdata.s3Bucket)
   732  		}
   733  		// Bucket region should match region
   734  		/*
   735  			The name of the Amazon S3 bucket where the .zip file that contains your deployment package is stored. This bucket must reside in the same AWS Region that you're creating the Lambda function in. You can specify a bucket from another AWS account as long as the Lambda function and the bucket are in the same region.
   736  		*/
   737  
   738  		bucketRegion, bucketRegionErr := spartaS3.BucketRegion(ctx.context.awsSession,
   739  			ctx.userdata.s3Bucket,
   740  			ctx.logger)
   741  
   742  		if bucketRegionErr != nil {
   743  			return nil, fmt.Errorf("failed to determine region for %s. Error: %s",
   744  				ctx.userdata.s3Bucket,
   745  				bucketRegionErr)
   746  		}
   747  		ctx.logger.WithFields(logrus.Fields{
   748  			"Bucket": ctx.userdata.s3Bucket,
   749  			"Region": bucketRegion,
   750  		}).Info("Checking S3 region")
   751  		if bucketRegion != *ctx.context.awsSession.Config.Region {
   752  			return nil, fmt.Errorf("region (%s) does not match bucket region (%s)",
   753  				*ctx.context.awsSession.Config.Region,
   754  				bucketRegion)
   755  		}
   756  		// Nothing else to do...
   757  		ctx.logger.WithFields(logrus.Fields{
   758  			"Region": bucketRegion,
   759  		}).Debug("Confirmed S3 region match")
   760  	}
   761  
   762  	// If there are codePipeline environments defined, warn if they don't include
   763  	// the same keysets
   764  	if nil != codePipelineEnvironments {
   765  		mapKeys := func(inboundMap map[string]string) []string {
   766  			keys := make([]string, len(inboundMap))
   767  			i := 0
   768  			for k := range inboundMap {
   769  				keys[i] = k
   770  				i++
   771  			}
   772  			return keys
   773  		}
   774  		aggregatedKeys := make([][]string, len(codePipelineEnvironments))
   775  		i := 0
   776  		for _, eachEnvMap := range codePipelineEnvironments {
   777  			aggregatedKeys[i] = mapKeys(eachEnvMap)
   778  			i++
   779  		}
   780  		i = 0
   781  		keysEqual := true
   782  		for _, eachKeySet := range aggregatedKeys {
   783  			j := 0
   784  			for _, eachKeySetTest := range aggregatedKeys {
   785  				if j != i {
   786  					if !reflect.DeepEqual(eachKeySet, eachKeySetTest) {
   787  						keysEqual = false
   788  					}
   789  				}
   790  				j++
   791  			}
   792  			i++
   793  		}
   794  		if !keysEqual {
   795  			// Setup an interface with the fields so that the log message
   796  			fields := make(logrus.Fields, len(codePipelineEnvironments))
   797  			for eachEnv, eachEnvMap := range codePipelineEnvironments {
   798  				fields[eachEnv] = eachEnvMap
   799  			}
   800  			ctx.logger.WithFields(fields).Warn("CodePipeline environments do not define equivalent environment keys")
   801  		}
   802  	}
   803  
   804  	return createPackageStep(), nil
   805  }
   806  
   807  // Build and package the application
   808  func createPackageStep() workflowStep {
   809  	return func(ctx *workflowContext) (workflowStep, error) {
   810  		defer recordDuration(time.Now(), "Creating code bundle", ctx)
   811  
   812  		// PreBuild Hook
   813  		if ctx.userdata.workflowHooks != nil {
   814  			preBuildErr := callWorkflowHook("PreBuild",
   815  				ctx.userdata.workflowHooks.PreBuild,
   816  				ctx.userdata.workflowHooks.PreBuilds,
   817  				ctx)
   818  			if nil != preBuildErr {
   819  				return nil, preBuildErr
   820  			}
   821  		}
   822  		sanitizedServiceName := sanitizedName(ctx.userdata.serviceName)
   823  		buildErr := system.BuildGoBinary(ctx.userdata.serviceName,
   824  			ctx.context.binaryName,
   825  			ctx.userdata.useCGO,
   826  			ctx.userdata.buildID,
   827  			ctx.userdata.buildTags,
   828  			ctx.userdata.linkFlags,
   829  			ctx.userdata.noop,
   830  			ctx.logger)
   831  		if nil != buildErr {
   832  			return nil, buildErr
   833  		}
   834  		// Cleanup the temporary binary
   835  		defer func() {
   836  			errRemove := os.Remove(ctx.context.binaryName)
   837  			if nil != errRemove {
   838  				ctx.logger.WithFields(logrus.Fields{
   839  					"File":  ctx.context.binaryName,
   840  					"Error": errRemove,
   841  				}).Warn("Failed to delete binary")
   842  			}
   843  		}()
   844  
   845  		// PostBuild Hook
   846  		if ctx.userdata.workflowHooks != nil {
   847  			postBuildErr := callWorkflowHook("PostBuild",
   848  				ctx.userdata.workflowHooks.PostBuild,
   849  				ctx.userdata.workflowHooks.PostBuilds,
   850  				ctx)
   851  			if nil != postBuildErr {
   852  				return nil, postBuildErr
   853  			}
   854  		}
   855  		tmpFile, err := system.TemporaryFile(ScratchDirectory,
   856  			fmt.Sprintf("%s-code.zip", sanitizedServiceName))
   857  		if err != nil {
   858  			return nil, err
   859  		}
   860  		// Strip the local directory in case it's in there...
   861  		ctx.logger.WithFields(logrus.Fields{
   862  			"TempName": relativePath(tmpFile.Name()),
   863  		}).Info("Creating code ZIP archive for upload")
   864  		lambdaArchive := zip.NewWriter(tmpFile)
   865  
   866  		// Archive Hook
   867  		archiveErr := callArchiveHook(lambdaArchive, ctx)
   868  		if nil != archiveErr {
   869  			return nil, archiveErr
   870  		}
   871  		// Issue: https://github.com/mweagle/Sparta/issues/103. If the executable
   872  		// bit isn't set, then AWS Lambda won't be able to fork the binary. This tends
   873  		// to be set properly on a mac/linux os, but not on others. So pre-emptively
   874  		// always set the bit.
   875  		// Ref: https://github.com/mweagle/Sparta/issues/158
   876  		fileHeaderAnnotator := func(header *zip.FileHeader) (*zip.FileHeader, error) {
   877  			// Make the binary executable
   878  			// Ref: https://github.com/aws/aws-lambda-go/blob/master/cmd/build-lambda-zip/main.go#L51
   879  			header.CreatorVersion = 3 << 8
   880  			header.ExternalAttrs = 0777 << 16
   881  			return header, nil
   882  		}
   883  
   884  		// File info for the binary executable
   885  		readerErr := spartaZip.AnnotateAddToZip(lambdaArchive,
   886  			ctx.context.binaryName,
   887  			"",
   888  			fileHeaderAnnotator,
   889  			ctx.logger)
   890  		if nil != readerErr {
   891  			return nil, readerErr
   892  		}
   893  		archiveCloseErr := lambdaArchive.Close()
   894  		if nil != archiveCloseErr {
   895  			return nil, archiveCloseErr
   896  		}
   897  		tempfileCloseErr := tmpFile.Close()
   898  		if nil != tempfileCloseErr {
   899  			return nil, tempfileCloseErr
   900  		}
   901  		return createUploadStep(tmpFile.Name()), nil
   902  	}
   903  }
   904  
   905  // Given the zipped binary in packagePath, upload the primary code bundle
   906  // and optional S3 site resources iff they're defined.
   907  func createUploadStep(packagePath string) workflowStep {
   908  	return func(ctx *workflowContext) (workflowStep, error) {
   909  		defer recordDuration(time.Now(), "Uploading code", ctx)
   910  
   911  		var uploadTasks []*workTask
   912  		if len(ctx.userdata.lambdaAWSInfos) != 0 {
   913  			// We always upload the primary binary...
   914  			uploadBinaryTask := func() workResult {
   915  				logFilesize("Lambda code archive size", packagePath, ctx.logger)
   916  
   917  				// Create the S3 key...
   918  				zipS3URL, zipS3URLErr := uploadLocalFileToS3(packagePath, "", ctx)
   919  				if nil != zipS3URLErr {
   920  					return newTaskResult(nil, zipS3URLErr)
   921  				}
   922  				ctx.context.s3CodeZipURL = newS3UploadURL(zipS3URL)
   923  				return newTaskResult(ctx.context.s3CodeZipURL, nil)
   924  			}
   925  			uploadTasks = append(uploadTasks, newWorkTask(uploadBinaryTask))
   926  		} else {
   927  			ctx.logger.Info("Bypassing S3 upload as no Lambda functions were provided")
   928  		}
   929  
   930  		// We might need to upload some other things...
   931  		if nil != ctx.userdata.s3SiteContext.s3Site {
   932  			uploadSiteTask := func() workResult {
   933  				tempName := fmt.Sprintf("%s-S3Site.zip", ctx.userdata.serviceName)
   934  				tmpFile, err := system.TemporaryFile(ScratchDirectory, tempName)
   935  				if err != nil {
   936  					return newTaskResult(nil,
   937  						errors.Wrapf(err, "Failed to create temporary S3 site archive file"))
   938  				}
   939  
   940  				// Add the contents to the Zip file
   941  				zipArchive := zip.NewWriter(tmpFile)
   942  				absResourcePath, err := filepath.Abs(ctx.userdata.s3SiteContext.s3Site.resources)
   943  				if nil != err {
   944  					return newTaskResult(nil, errors.Wrapf(err, "Failed to get absolute filepath"))
   945  				}
   946  				// Ensure that the directory exists...
   947  				_, existsErr := os.Stat(ctx.userdata.s3SiteContext.s3Site.resources)
   948  				if existsErr != nil && os.IsNotExist(existsErr) {
   949  					return newTaskResult(nil,
   950  						errors.Wrapf(existsErr,
   951  							"TheS3 Site resources directory (%s) does not exist",
   952  							ctx.userdata.s3SiteContext.s3Site.resources))
   953  				}
   954  
   955  				ctx.logger.WithFields(logrus.Fields{
   956  					"S3Key":      path.Base(tmpFile.Name()),
   957  					"SourcePath": absResourcePath,
   958  				}).Info("Creating S3Site archive")
   959  
   960  				err = spartaZip.AddToZip(zipArchive, absResourcePath, absResourcePath, ctx.logger)
   961  				if nil != err {
   962  					return newTaskResult(nil, err)
   963  				}
   964  				errClose := zipArchive.Close()
   965  				if errClose != nil {
   966  					return newTaskResult(nil, errClose)
   967  				}
   968  
   969  				// Upload it & save the key
   970  				s3SiteLambdaZipURL, s3SiteLambdaZipURLErr := uploadLocalFileToS3(tmpFile.Name(), "", ctx)
   971  				if s3SiteLambdaZipURLErr != nil {
   972  					return newTaskResult(nil,
   973  						errors.Wrapf(s3SiteLambdaZipURLErr, "Failed to upload local file to S3"))
   974  				}
   975  				ctx.userdata.s3SiteContext.s3UploadURL = newS3UploadURL(s3SiteLambdaZipURL)
   976  				return newTaskResult(ctx.userdata.s3SiteContext.s3UploadURL, nil)
   977  			}
   978  			uploadTasks = append(uploadTasks, newWorkTask(uploadSiteTask))
   979  
   980  		}
   981  
   982  		// Run it and figure out what happened
   983  		p := newWorkerPool(uploadTasks, len(uploadTasks))
   984  		_, uploadErrors := p.Run()
   985  
   986  		if len(uploadErrors) > 0 {
   987  			return nil, errors.Errorf("Encountered multiple errors during upload: %#v", uploadErrors)
   988  		}
   989  		return validateSpartaPostconditions(), nil
   990  	}
   991  }
   992  
   993  // maximumStackOperationTimeout returns the timeout
   994  // value to use for a stack operation based on the type
   995  // of resources that it provisions. In general the timeout
   996  // is short with an exception made for CloudFront
   997  // distributions
   998  func maximumStackOperationTimeout(template *gocf.Template, logger *logrus.Logger) time.Duration {
   999  	stackOperationTimeout := 20 * time.Minute
  1000  	// If there is a CloudFront distributon in there then
  1001  	// let's give that a bit more time to settle down...In general
  1002  	// the initial CloudFront distribution takes ~30 minutes
  1003  	for _, eachResource := range template.Resources {
  1004  		if eachResource.Properties.CfnResourceType() == "AWS::CloudFront::Distribution" {
  1005  			stackOperationTimeout = 60 * time.Minute
  1006  			break
  1007  		}
  1008  	}
  1009  	logger.WithField("OperationTimeout", stackOperationTimeout).Debug("Computed operation timeout value")
  1010  	return stackOperationTimeout
  1011  }
  1012  
  1013  // createCodePipelineTriggerPackage handles marshaling the template, zipping
  1014  // the config files in the package, and the
  1015  func createCodePipelineTriggerPackage(cfTemplateJSON []byte, ctx *workflowContext) (string, error) {
  1016  	tmpFile, err := system.TemporaryFile(ScratchDirectory, ctx.userdata.codePipelineTrigger)
  1017  	if err != nil {
  1018  		return "", errors.Wrapf(err, "Failed to create temporary file for CodePipeline")
  1019  	}
  1020  
  1021  	ctx.logger.WithFields(logrus.Fields{
  1022  		"PipelineName": tmpFile.Name(),
  1023  	}).Info("Creating pipeline archive")
  1024  
  1025  	templateArchive := zip.NewWriter(tmpFile)
  1026  	ctx.logger.WithFields(logrus.Fields{
  1027  		"Path": tmpFile.Name(),
  1028  	}).Info("Creating CodePipeline archive")
  1029  
  1030  	// File info for the binary executable
  1031  	zipEntryName := "cloudformation.json"
  1032  	bytesWriter, bytesWriterErr := templateArchive.Create(zipEntryName)
  1033  	if bytesWriterErr != nil {
  1034  		return "", bytesWriterErr
  1035  	}
  1036  
  1037  	bytesReader := bytes.NewReader(cfTemplateJSON)
  1038  	written, writtenErr := io.Copy(bytesWriter, bytesReader)
  1039  	if nil != writtenErr {
  1040  		return "", writtenErr
  1041  	}
  1042  	ctx.logger.WithFields(logrus.Fields{
  1043  		"WrittenBytes": written,
  1044  		"ZipName":      zipEntryName,
  1045  	}).Debug("Archiving file")
  1046  
  1047  	// If there is a codePipelineEnvironments defined, then we'll need to get all the
  1048  	// maps, marshal them to JSON, then add the JSON to the ZIP archive.
  1049  	if nil != codePipelineEnvironments {
  1050  		for eachEnvironment, eachMap := range codePipelineEnvironments {
  1051  			codePipelineParameters := map[string]interface{}{
  1052  				"Parameters": eachMap,
  1053  			}
  1054  			environmentJSON, environmentJSONErr := json.Marshal(codePipelineParameters)
  1055  			if nil != environmentJSONErr {
  1056  				ctx.logger.Error("Failed to Marshal CodePipeline environment: " + eachEnvironment)
  1057  				return "", environmentJSONErr
  1058  			}
  1059  			var envVarName = fmt.Sprintf("%s.json", eachEnvironment)
  1060  
  1061  			// File info for the binary executable
  1062  			binaryWriter, binaryWriterErr := templateArchive.Create(envVarName)
  1063  			if binaryWriterErr != nil {
  1064  				return "", binaryWriterErr
  1065  			}
  1066  			_, writeErr := binaryWriter.Write(environmentJSON)
  1067  			if writeErr != nil {
  1068  				return "", writeErr
  1069  			}
  1070  		}
  1071  	}
  1072  	archiveCloseErr := templateArchive.Close()
  1073  	if nil != archiveCloseErr {
  1074  		return "", archiveCloseErr
  1075  	}
  1076  	tempfileCloseErr := tmpFile.Close()
  1077  	if nil != tempfileCloseErr {
  1078  		return "", tempfileCloseErr
  1079  	}
  1080  	// Leave it here...
  1081  	ctx.logger.WithFields(logrus.Fields{
  1082  		"File": filepath.Base(tmpFile.Name()),
  1083  	}).Info("Created CodePipeline archive")
  1084  	// The key is the name + the pipeline name
  1085  	return tmpFile.Name(), nil
  1086  }
  1087  
  1088  // If the only detected changes to a stack are Lambda code updates,
  1089  // then update use the LAmbda API to update the code directly
  1090  // rather than waiting for CloudFormation
  1091  func applyInPlaceFunctionUpdates(ctx *workflowContext, templateURL string) (*cloudformation.Stack, error) {
  1092  	// Get the updates...
  1093  	awsCloudFormation := cloudformation.New(ctx.context.awsSession)
  1094  	changeSetRequestName := CloudFormationResourceName(fmt.Sprintf("%sInPlaceChangeSet", ctx.userdata.serviceName))
  1095  	changes, changesErr := spartaCF.CreateStackChangeSet(changeSetRequestName,
  1096  		ctx.userdata.serviceName,
  1097  		ctx.context.cfTemplate,
  1098  		templateURL,
  1099  		nil,
  1100  		awsCloudFormation,
  1101  		ctx.logger)
  1102  	if nil != changesErr {
  1103  		return nil, changesErr
  1104  	}
  1105  	if nil == changes || len(changes.Changes) <= 0 {
  1106  		return nil, fmt.Errorf("no changes detected")
  1107  	}
  1108  	updateCodeRequests := []*lambda.UpdateFunctionCodeInput{}
  1109  	invalidInPlaceRequests := []string{}
  1110  	for _, eachChange := range changes.Changes {
  1111  		resourceChange := eachChange.ResourceChange
  1112  		if *resourceChange.Action == "Modify" && *resourceChange.ResourceType == "AWS::Lambda::Function" {
  1113  			updateCodeRequest := &lambda.UpdateFunctionCodeInput{
  1114  				FunctionName: resourceChange.PhysicalResourceId,
  1115  				S3Bucket:     aws.String(ctx.userdata.s3Bucket),
  1116  				S3Key:        aws.String(ctx.context.s3CodeZipURL.keyName()),
  1117  			}
  1118  			if ctx.context.s3CodeZipURL != nil && ctx.context.s3CodeZipURL.version != "" {
  1119  				updateCodeRequest.S3ObjectVersion = aws.String(ctx.context.s3CodeZipURL.version)
  1120  			}
  1121  			updateCodeRequests = append(updateCodeRequests, updateCodeRequest)
  1122  		} else {
  1123  			invalidInPlaceRequests = append(invalidInPlaceRequests,
  1124  				fmt.Sprintf("%s for %s (ResourceType: %s)",
  1125  					*resourceChange.Action,
  1126  					*resourceChange.LogicalResourceId,
  1127  					*resourceChange.ResourceType))
  1128  		}
  1129  	}
  1130  	if len(invalidInPlaceRequests) != 0 {
  1131  		return nil, fmt.Errorf("unsupported in-place operations detected:\n\t%s", strings.Join(invalidInPlaceRequests, ",\n\t"))
  1132  	}
  1133  
  1134  	ctx.logger.WithFields(logrus.Fields{
  1135  		"FunctionCount": len(updateCodeRequests),
  1136  	}).Info("Updating Lambda function code")
  1137  	ctx.logger.WithFields(logrus.Fields{
  1138  		"Updates": updateCodeRequests,
  1139  	}).Debug("Update requests")
  1140  
  1141  	updateTaskMaker := func(lambdaSvc *lambda.Lambda, request *lambda.UpdateFunctionCodeInput) taskFunc {
  1142  		return func() workResult {
  1143  			_, updateResultErr := lambdaSvc.UpdateFunctionCode(request)
  1144  			return newTaskResult("", updateResultErr)
  1145  		}
  1146  	}
  1147  	inPlaceUpdateTasks := make([]*workTask,
  1148  		len(updateCodeRequests))
  1149  	awsLambda := lambda.New(ctx.context.awsSession)
  1150  	for eachIndex, eachUpdateCodeRequest := range updateCodeRequests {
  1151  		updateTask := updateTaskMaker(awsLambda, eachUpdateCodeRequest)
  1152  		inPlaceUpdateTasks[eachIndex] = newWorkTask(updateTask)
  1153  	}
  1154  
  1155  	// Add the request to delete the change set...
  1156  	// TODO: add some retry logic in here to handle failures.
  1157  	deleteChangeSetTask := func() workResult {
  1158  		_, deleteChangeSetResultErr := spartaCF.DeleteChangeSet(ctx.userdata.serviceName,
  1159  			changeSetRequestName,
  1160  			awsCloudFormation)
  1161  		return newTaskResult("", deleteChangeSetResultErr)
  1162  	}
  1163  	inPlaceUpdateTasks = append(inPlaceUpdateTasks, newWorkTask(deleteChangeSetTask))
  1164  	p := newWorkerPool(inPlaceUpdateTasks, len(inPlaceUpdateTasks))
  1165  	_, asyncErrors := p.Run()
  1166  	if len(asyncErrors) != 0 {
  1167  		return nil, fmt.Errorf("failed to update function code: %v", asyncErrors)
  1168  	}
  1169  	// Describe the stack so that we can satisfy the contract with the
  1170  	// normal path using CloudFormation
  1171  	describeStacksInput := &cloudformation.DescribeStacksInput{
  1172  		StackName: aws.String(ctx.userdata.serviceName),
  1173  	}
  1174  	describeStackOutput, describeStackOutputErr := awsCloudFormation.DescribeStacks(describeStacksInput)
  1175  	if nil != describeStackOutputErr {
  1176  		return nil, describeStackOutputErr
  1177  	}
  1178  	return describeStackOutput.Stacks[0], nil
  1179  }
  1180  
  1181  // applyCloudFormationOperation is responsible for taking the current template
  1182  // and applying that operation to the stack. It's where the in-place
  1183  // branch is applied, because at this point all the template
  1184  // mutations have been accumulated
  1185  func applyCloudFormationOperation(ctx *workflowContext) (workflowStep, error) {
  1186  	stackTags := map[string]string{
  1187  		SpartaTagBuildIDKey: ctx.userdata.buildID,
  1188  	}
  1189  	if len(ctx.userdata.buildTags) != 0 {
  1190  		stackTags[SpartaTagBuildTagsKey] = ctx.userdata.buildTags
  1191  	}
  1192  
  1193  	// Generate the CF template...
  1194  	cfTemplate, err := json.Marshal(ctx.context.cfTemplate)
  1195  	if err != nil {
  1196  		ctx.logger.Error("Failed to Marshal CloudFormation template: ", err.Error())
  1197  		return nil, err
  1198  	}
  1199  
  1200  	// Consistent naming of template
  1201  	sanitizedServiceName := sanitizedName(ctx.userdata.serviceName)
  1202  	templateName := fmt.Sprintf("%s-cftemplate.json", sanitizedServiceName)
  1203  	templateFile, templateFileErr := system.TemporaryFile(ScratchDirectory, templateName)
  1204  	if nil != templateFileErr {
  1205  		return nil, templateFileErr
  1206  	}
  1207  	_, writeErr := templateFile.Write(cfTemplate)
  1208  	if nil != writeErr {
  1209  		return nil, writeErr
  1210  	}
  1211  	errClose := templateFile.Close()
  1212  	if errClose != nil {
  1213  		return nil, errClose
  1214  	}
  1215  	// Log the template if needed
  1216  	if nil != ctx.context.templateWriter || ctx.logger.Level <= logrus.DebugLevel {
  1217  		templateBody := string(cfTemplate)
  1218  		formatted, formattedErr := json.MarshalIndent(templateBody, "", " ")
  1219  		if nil != formattedErr {
  1220  			return nil, formattedErr
  1221  		}
  1222  		ctx.logger.WithFields(logrus.Fields{
  1223  			"Body": string(formatted),
  1224  		}).Debug("CloudFormation template body")
  1225  		if nil != ctx.context.templateWriter {
  1226  			_, writeErr := io.WriteString(ctx.context.templateWriter,
  1227  				string(formatted))
  1228  			if writeErr != nil {
  1229  				return nil, errors.Wrapf(writeErr, "Failed to write template")
  1230  			}
  1231  		}
  1232  	}
  1233  
  1234  	// If this isn't a codePipelineTrigger, then do that
  1235  	if ctx.userdata.codePipelineTrigger == "" {
  1236  		if ctx.userdata.noop {
  1237  			ctx.logger.WithFields(logrus.Fields{
  1238  				"Bucket":       ctx.userdata.s3Bucket,
  1239  				"TemplateName": templateName,
  1240  			}).Info(noopMessage("Stack creation"))
  1241  		} else {
  1242  			// Dump the template to a file, then upload it...
  1243  			uploadURL, uploadURLErr := uploadLocalFileToS3(templateFile.Name(), "", ctx)
  1244  			if nil != uploadURLErr {
  1245  				return nil, uploadURLErr
  1246  			}
  1247  
  1248  			// If we're supposed to be inplace, then go ahead and try that
  1249  			var stack *cloudformation.Stack
  1250  			var stackErr error
  1251  			if ctx.userdata.inPlace {
  1252  				stack, stackErr = applyInPlaceFunctionUpdates(ctx, uploadURL)
  1253  			} else {
  1254  				operationTimeout := maximumStackOperationTimeout(ctx.context.cfTemplate, ctx.logger)
  1255  				// Regular update, go ahead with the CloudFormation changes
  1256  				stack, stackErr = spartaCF.ConvergeStackState(ctx.userdata.serviceName,
  1257  					ctx.context.cfTemplate,
  1258  					uploadURL,
  1259  					stackTags,
  1260  					ctx.transaction.startTime,
  1261  					operationTimeout,
  1262  					ctx.context.awsSession,
  1263  					"▬",
  1264  					dividerLength,
  1265  					ctx.logger)
  1266  			}
  1267  			if nil != stackErr {
  1268  				return nil, stackErr
  1269  			}
  1270  			ctx.logger.WithFields(logrus.Fields{
  1271  				"StackName":    *stack.StackName,
  1272  				"StackId":      *stack.StackId,
  1273  				"CreationTime": *stack.CreationTime,
  1274  			}).Info("Stack provisioned")
  1275  		}
  1276  	} else {
  1277  		ctx.logger.Info("Creating pipeline package")
  1278  
  1279  		ctx.registerFileCleanupFinalizer(templateFile.Name())
  1280  		_, urlErr := createCodePipelineTriggerPackage(cfTemplate, ctx)
  1281  		if nil != urlErr {
  1282  			return nil, urlErr
  1283  		}
  1284  	}
  1285  	return nil, nil
  1286  }
  1287  
  1288  func verifyLambdaPreconditions(lambdaAWSInfo *LambdaAWSInfo, logger *logrus.Logger) error {
  1289  
  1290  	return nil
  1291  }
  1292  
  1293  func validateSpartaPostconditions() workflowStep {
  1294  	return func(ctx *workflowContext) (workflowStep, error) {
  1295  		validateErrs := make([]error, 0)
  1296  
  1297  		requiredEnvVars := []string{envVarDiscoveryInformation,
  1298  			envVarLogLevel}
  1299  
  1300  		// Verify that all Lambda functions have discovery information
  1301  		for eachResourceID, eachResourceDef := range ctx.context.cfTemplate.Resources {
  1302  			switch typedResource := eachResourceDef.Properties.(type) {
  1303  			case *gocf.LambdaFunction:
  1304  				if typedResource.Environment == nil {
  1305  					validateErrs = append(validateErrs,
  1306  						errors.Errorf("Lambda function %s does not include environment info", eachResourceID))
  1307  				} else {
  1308  					vars, varsOk := typedResource.Environment.Variables.(map[string]interface{})
  1309  					if !varsOk {
  1310  						validateErrs = append(validateErrs,
  1311  							errors.Errorf("Lambda function %s environment vars are unsupported type: %T",
  1312  								eachResourceID,
  1313  								typedResource.Environment.Variables))
  1314  					} else {
  1315  						for _, eachKey := range requiredEnvVars {
  1316  							_, exists := vars[eachKey]
  1317  							if !exists {
  1318  								validateErrs = append(validateErrs,
  1319  									errors.Errorf("Lambda function %s environment does not include key: %s",
  1320  										eachResourceID,
  1321  										eachKey))
  1322  							}
  1323  						}
  1324  					}
  1325  				}
  1326  			}
  1327  		}
  1328  		if len(validateErrs) != 0 {
  1329  			return nil, errors.Errorf("Problems validating template contents: %v", validateErrs)
  1330  		}
  1331  		return ensureCloudFormationStack(), nil
  1332  	}
  1333  }
  1334  
  1335  // ensureCloudFormationStack is responsible for
  1336  func ensureCloudFormationStack() workflowStep {
  1337  	return func(ctx *workflowContext) (workflowStep, error) {
  1338  		msg := "Ensuring CloudFormation stack"
  1339  		if ctx.userdata.inPlace {
  1340  			msg = "Updating Lambda function code "
  1341  		}
  1342  		defer recordDuration(time.Now(), msg, ctx)
  1343  
  1344  		// PreMarshall Hook
  1345  		if ctx.userdata.workflowHooks != nil {
  1346  			preMarshallErr := callWorkflowHook("PreMarshall",
  1347  				ctx.userdata.workflowHooks.PreMarshall,
  1348  				ctx.userdata.workflowHooks.PreMarshalls,
  1349  				ctx)
  1350  			if nil != preMarshallErr {
  1351  				return nil, preMarshallErr
  1352  			}
  1353  		}
  1354  
  1355  		// Add the "Parameters" to the template...
  1356  		if nil != codePipelineEnvironments {
  1357  			ctx.context.cfTemplate.Parameters = make(map[string]*gocf.Parameter)
  1358  			for _, eachEnvironment := range codePipelineEnvironments {
  1359  				for eachKey := range eachEnvironment {
  1360  					ctx.context.cfTemplate.Parameters[eachKey] = &gocf.Parameter{
  1361  						Type:    "String",
  1362  						Default: "",
  1363  					}
  1364  				}
  1365  			}
  1366  		}
  1367  		for _, eachEntry := range ctx.userdata.lambdaAWSInfos {
  1368  			verifyErr := verifyLambdaPreconditions(eachEntry, ctx.logger)
  1369  			if verifyErr != nil {
  1370  				return nil, verifyErr
  1371  			}
  1372  			annotateCodePipelineEnvironments(eachEntry, ctx.logger)
  1373  
  1374  			err := eachEntry.export(ctx.userdata.serviceName,
  1375  				ctx.userdata.s3Bucket,
  1376  				codeZipKey(ctx.context.s3CodeZipURL),
  1377  				codeZipVersion(ctx.context.s3CodeZipURL),
  1378  				ctx.userdata.buildID,
  1379  				ctx.context.lambdaIAMRoleNameMap,
  1380  				ctx.context.cfTemplate,
  1381  				ctx.context.workflowHooksContext,
  1382  				ctx.logger)
  1383  			if nil != err {
  1384  				return nil, err
  1385  			}
  1386  		}
  1387  		// If there's an API gateway definition, include the resources that provision it. Since this export will likely
  1388  		// generate outputs that the s3 site needs, we'll use a temporary outputs accumulator, pass that to the S3Site
  1389  		// if it's defined, and then merge it with the normal output map.
  1390  		apiGatewayTemplate := gocf.NewTemplate()
  1391  
  1392  		if nil != ctx.userdata.api {
  1393  			err := ctx.userdata.api.Marshal(
  1394  				ctx.userdata.serviceName,
  1395  				ctx.context.awsSession,
  1396  				ctx.userdata.s3Bucket,
  1397  				codeZipKey(ctx.context.s3CodeZipURL),
  1398  				codeZipVersion(ctx.context.s3CodeZipURL),
  1399  				ctx.context.lambdaIAMRoleNameMap,
  1400  				apiGatewayTemplate,
  1401  				ctx.userdata.noop,
  1402  				ctx.logger)
  1403  			if nil == err {
  1404  				safeMergeErrs := gocc.SafeMerge(apiGatewayTemplate,
  1405  					ctx.context.cfTemplate)
  1406  				if len(safeMergeErrs) != 0 {
  1407  					err = errors.Errorf("APIGateway template merge failed: %v", safeMergeErrs)
  1408  				}
  1409  			}
  1410  			if nil != err {
  1411  				return nil, errors.Wrapf(err, "APIGateway template export failed")
  1412  			}
  1413  		}
  1414  
  1415  		// Service decorator?
  1416  		// This is run before the S3 Site in case the decorators
  1417  		// need to publish data to the MANIFEST for the site
  1418  		serviceDecoratorErr := callServiceDecoratorHook(ctx)
  1419  		if serviceDecoratorErr != nil {
  1420  			return nil, serviceDecoratorErr
  1421  		}
  1422  
  1423  		// Discovery info on a per-function basis
  1424  		for _, eachEntry := range ctx.userdata.lambdaAWSInfos {
  1425  			_, annotateErr := annotateDiscoveryInfo(eachEntry, ctx.context.cfTemplate, ctx.logger)
  1426  			if annotateErr != nil {
  1427  				return nil, annotateErr
  1428  			}
  1429  			_, annotateErr = annotateBuildInformation(eachEntry,
  1430  				ctx.context.cfTemplate,
  1431  				ctx.userdata.buildID,
  1432  				ctx.logger)
  1433  			if annotateErr != nil {
  1434  				return nil, annotateErr
  1435  			}
  1436  			// Any custom resources? These may also need discovery info
  1437  			// so that they can self-discover the stack name
  1438  			for _, eachCustomResource := range eachEntry.customResources {
  1439  				discoveryInfo, discoveryInfoErr := discoveryInfoForResource(eachCustomResource.logicalName(),
  1440  					nil)
  1441  				if discoveryInfoErr != nil {
  1442  					return nil, discoveryInfoErr
  1443  				}
  1444  				ctx.logger.WithFields(logrus.Fields{
  1445  					"Discovery": discoveryInfo,
  1446  					"Resource":  eachCustomResource.logicalName(),
  1447  				}).Info("Annotating discovery info for custom resource")
  1448  
  1449  				// Update the env map
  1450  				eachCustomResource.options.Environment[envVarDiscoveryInformation] = discoveryInfo
  1451  			}
  1452  		}
  1453  		// If there's a Site defined, include the resources the provision it
  1454  		if nil != ctx.userdata.s3SiteContext.s3Site {
  1455  			exportErr := ctx.userdata.s3SiteContext.s3Site.export(ctx.userdata.serviceName,
  1456  				ctx.context.binaryName,
  1457  				ctx.userdata.s3Bucket,
  1458  				codeZipKey(ctx.context.s3CodeZipURL),
  1459  				ctx.userdata.s3SiteContext.s3UploadURL.keyName(),
  1460  				apiGatewayTemplate.Outputs,
  1461  				ctx.context.lambdaIAMRoleNameMap,
  1462  				ctx.context.cfTemplate,
  1463  				ctx.logger)
  1464  			if exportErr != nil {
  1465  				return nil, errors.Wrapf(exportErr, "Failed to export S3 site")
  1466  			}
  1467  		}
  1468  
  1469  		// PostMarshall Hook
  1470  		if ctx.userdata.workflowHooks != nil {
  1471  			postMarshallErr := callWorkflowHook("PostMarshall",
  1472  				ctx.userdata.workflowHooks.PostMarshall,
  1473  				ctx.userdata.workflowHooks.PostMarshalls,
  1474  				ctx)
  1475  			if nil != postMarshallErr {
  1476  				return nil, postMarshallErr
  1477  			}
  1478  		}
  1479  		// Last step, run the annotation steps to patch
  1480  		// up any references that depends on the entire
  1481  		// template being constructed
  1482  		_, annotateErr := annotateMaterializedTemplate(ctx.userdata.lambdaAWSInfos,
  1483  			ctx.context.cfTemplate,
  1484  			ctx.logger)
  1485  		if annotateErr != nil {
  1486  			return nil, errors.Wrapf(annotateErr,
  1487  				"Failed to perform final template annotations")
  1488  		}
  1489  
  1490  		// validations?
  1491  		if ctx.userdata.workflowHooks != nil {
  1492  			validationErr := callValidationHooks(ctx.userdata.workflowHooks.Validators,
  1493  				ctx.context.cfTemplate,
  1494  				ctx)
  1495  			if validationErr != nil {
  1496  				return nil, validationErr
  1497  			}
  1498  		}
  1499  
  1500  		// Do the operation!
  1501  		return applyCloudFormationOperation(ctx)
  1502  	}
  1503  }
  1504  
  1505  // Provision compiles, packages, and provisions (either via create or update) a Sparta application.
  1506  // The serviceName is the service's logical
  1507  // identify and is used to determine create vs update operations.  The compilation options/flags are:
  1508  //
  1509  // 	TAGS:         -tags lambdabinary
  1510  // 	ENVIRONMENT:  GOOS=linux GOARCH=amd64
  1511  //
  1512  // The compiled binary is packaged with a NodeJS proxy shim to manage AWS Lambda setup & invocation per
  1513  // http://docs.aws.amazon.com/lambda/latest/dg/authoring-function-in-nodejs.html
  1514  //
  1515  // The two files are ZIP'd, posted to S3 and used as an input to a dynamically generated CloudFormation
  1516  // template (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/Welcome.html)
  1517  // which creates or updates the service state.
  1518  //
  1519  func Provision(noop bool,
  1520  	serviceName string,
  1521  	serviceDescription string,
  1522  	lambdaAWSInfos []*LambdaAWSInfo,
  1523  	api APIGateway,
  1524  	site *S3Site,
  1525  	s3Bucket string,
  1526  	useCGO bool,
  1527  	inPlaceUpdates bool,
  1528  	buildID string,
  1529  	codePipelineTrigger string,
  1530  	buildTags string,
  1531  	linkerFlags string,
  1532  	templateWriter io.Writer,
  1533  	workflowHooks *WorkflowHooks,
  1534  	logger *logrus.Logger) error {
  1535  
  1536  	err := validateSpartaPreconditions(lambdaAWSInfos, logger)
  1537  	if nil != err {
  1538  		return errors.Wrapf(err, "Failed to validate preconditions")
  1539  	}
  1540  	startTime := time.Now()
  1541  
  1542  	ctx := &workflowContext{
  1543  		logger: logger,
  1544  		userdata: userdata{
  1545  			noop:               noop,
  1546  			useCGO:             useCGO,
  1547  			inPlace:            inPlaceUpdates,
  1548  			buildID:            buildID,
  1549  			buildTags:          buildTags,
  1550  			linkFlags:          linkerFlags,
  1551  			serviceName:        serviceName,
  1552  			serviceDescription: serviceDescription,
  1553  			lambdaAWSInfos:     lambdaAWSInfos,
  1554  			api:                api,
  1555  			s3Bucket:           s3Bucket,
  1556  			s3SiteContext: &s3SiteContext{
  1557  				s3Site: site,
  1558  			},
  1559  			codePipelineTrigger: codePipelineTrigger,
  1560  			workflowHooks:       workflowHooks,
  1561  		},
  1562  		context: provisionContext{
  1563  			cfTemplate:                gocf.NewTemplate(),
  1564  			s3BucketVersioningEnabled: false,
  1565  			awsSession:                spartaAWS.NewSession(logger),
  1566  			workflowHooksContext:      make(map[string]interface{}),
  1567  			templateWriter:            templateWriter,
  1568  			binaryName:                SpartaBinaryName,
  1569  		},
  1570  		transaction: transaction{
  1571  			startTime: time.Now(),
  1572  		},
  1573  	}
  1574  	ctx.context.cfTemplate.Description = serviceDescription
  1575  
  1576  	// Update the context iff it exists
  1577  	if nil != workflowHooks && nil != workflowHooks.Context {
  1578  		for eachKey, eachValue := range workflowHooks.Context {
  1579  			ctx.context.workflowHooksContext[eachKey] = eachValue
  1580  		}
  1581  	}
  1582  
  1583  	ctx.logger.WithFields(logrus.Fields{
  1584  		"BuildID":             buildID,
  1585  		"NOOP":                noop,
  1586  		"Tags":                ctx.userdata.buildTags,
  1587  		"CodePipelineTrigger": ctx.userdata.codePipelineTrigger,
  1588  		"InPlaceUpdates":      ctx.userdata.inPlace,
  1589  	}).Info("Provisioning service")
  1590  
  1591  	if len(lambdaAWSInfos) <= 0 {
  1592  		// Warning? Maybe it's just decorators?
  1593  		if ctx.userdata.workflowHooks == nil {
  1594  			return errors.New("No lambda functions provided to Sparta.Provision()")
  1595  		}
  1596  		ctx.logger.Warn("No lambda functions provided to Sparta.Provision()")
  1597  	}
  1598  
  1599  	// Start the workflow
  1600  	for step := verifyIAMRoles; step != nil; {
  1601  		next, err := step(ctx)
  1602  		if err != nil {
  1603  			showOptionalAWSUsageInfo(err, ctx.logger)
  1604  
  1605  			ctx.rollback()
  1606  			// Workflow step?
  1607  			return errors.Wrapf(err, "Failed to provision service")
  1608  		}
  1609  
  1610  		if next == nil {
  1611  			summaryLine := fmt.Sprintf("%s Summary", ctx.userdata.serviceName)
  1612  			ctx.logger.Info(headerDivider)
  1613  			ctx.logger.Info(summaryLine)
  1614  			ctx.logger.Info(headerDivider)
  1615  			for _, eachEntry := range ctx.transaction.stepDurations {
  1616  				ctx.logger.WithFields(logrus.Fields{
  1617  					"Duration (s)": fmt.Sprintf("%.f", eachEntry.duration.Seconds()),
  1618  				}).Info(eachEntry.name)
  1619  			}
  1620  			elapsed := time.Since(startTime)
  1621  			ctx.logger.WithFields(logrus.Fields{
  1622  				"Duration (s)": fmt.Sprintf("%.f", elapsed.Seconds()),
  1623  			}).Info("Total elapsed time")
  1624  			curTime := time.Now()
  1625  			ctx.logger.WithFields(logrus.Fields{
  1626  				"Time (UTC)":   curTime.UTC().Format(time.RFC3339),
  1627  				"Time (Local)": curTime.Format(time.RFC822),
  1628  			}).Info("Complete")
  1629  			break
  1630  		} else {
  1631  			step = next
  1632  		}
  1633  	}
  1634  	// When we're done, execute any finalizers
  1635  	if nil != ctx.transaction.finalizerFunctions {
  1636  		ctx.logger.WithFields(logrus.Fields{
  1637  			"FinalizerCount": len(ctx.transaction.finalizerFunctions),
  1638  		}).Debug("Invoking finalizer functions")
  1639  		for _, eachFinalizer := range ctx.transaction.finalizerFunctions {
  1640  			eachFinalizer(ctx.logger)
  1641  		}
  1642  	}
  1643  	return nil
  1644  }