github.com/Jeffail/benthos/v3@v3.65.0/lib/processor/aws_lambda.go (about)

     1  package processor
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  	"time"
     7  
     8  	"github.com/Jeffail/benthos/v3/internal/docs"
     9  	"github.com/Jeffail/benthos/v3/internal/tracing"
    10  	"github.com/Jeffail/benthos/v3/lib/log"
    11  	"github.com/Jeffail/benthos/v3/lib/message"
    12  	"github.com/Jeffail/benthos/v3/lib/metrics"
    13  	"github.com/Jeffail/benthos/v3/lib/types"
    14  	"github.com/Jeffail/benthos/v3/lib/util/aws/lambda/client"
    15  )
    16  
    17  //------------------------------------------------------------------------------
    18  
    19  func init() {
    20  	Constructors[TypeAWSLambda] = TypeSpec{
    21  		constructor: NewAWSLambda,
    22  		Version:     "3.36.0",
    23  		Categories: []Category{
    24  			CategoryIntegration,
    25  		},
    26  		Summary: `
    27  Invokes an AWS lambda for each message. The contents of the message is the
    28  payload of the request, and the result of the invocation will become the new
    29  contents of the message.`,
    30  		Description: `
    31  It is possible to perform requests per message of a batch in parallel by setting
    32  the ` + "`parallel`" + ` flag to ` + "`true`" + `. The ` + "`rate_limit`" + `
    33  field can be used to specify a rate limit [resource](/docs/components/rate_limits/about)
    34  to cap the rate of requests across parallel components service wide.
    35  
    36  In order to map or encode the payload to a specific request body, and map the
    37  response back into the original payload instead of replacing it entirely, you
    38  can use the ` + "[`branch` processor](/docs/components/processors/branch)" + `.
    39  
    40  ### Error Handling
    41  
    42  When Benthos is unable to connect to the AWS endpoint or is otherwise unable to invoke the target lambda function it will retry the request according to the configured number of retries. Once these attempts have been exhausted the failed message will continue through the pipeline with it's contents unchanged, but flagged as having failed, allowing you to use [standard processor error handling patterns](/docs/configuration/error_handling).
    43  
    44  However, if the invocation of the function is successful but the function itself throws an error, then the message will have it's contents updated with a JSON payload describing the reason for the failure, and a metadata field ` + "`lambda_function_error`" + ` will be added to the message allowing you to detect and handle function errors with a ` + "[`branch`](/docs/components/processors/branch)" + `:
    45  
    46  ` + "```yaml" + `
    47  pipeline:
    48    processors:
    49      - branch:
    50          processors:
    51            - aws_lambda:
    52                function: foo
    53          result_map: |
    54            root = if meta().exists("lambda_function_error") {
    55              throw("Invocation failed due to %v: %v".format(this.errorType, this.errorMessage))
    56            } else {
    57              this
    58            }
    59  output:
    60    switch:
    61      retry_until_success: false
    62      cases:
    63        - check: errored()
    64          output:
    65            reject: ${! error() }
    66        - output:
    67            resource: somewhere_else
    68  ` + "```" + `
    69  
    70  ### Credentials
    71  
    72  By default Benthos will use a shared credentials file when connecting to AWS
    73  services. It's also possible to set them explicitly at the component level,
    74  allowing you to transfer data across accounts. You can find out more
    75  [in this document](/docs/guides/cloud/aws).`,
    76  		FieldSpecs: docs.FieldSpecs{
    77  			docs.FieldCommon("parallel", "Whether messages of a batch should be dispatched in parallel."),
    78  		}.Merge(client.FieldSpecs()),
    79  		Examples: []docs.AnnotatedExample{
    80  			{
    81  				Title: "Branched Invoke",
    82  				Summary: `
    83  This example uses a ` + "[`branch` processor](/docs/components/processors/branch/)" + ` to map a new payload for triggering a lambda function with an ID and username from the original message, and the result of the lambda is discarded, meaning the original message is unchanged.`,
    84  				Config: `
    85  pipeline:
    86    processors:
    87      - branch:
    88          request_map: '{"id":this.doc.id,"username":this.user.name}'
    89          processors:
    90            - aws_lambda:
    91                function: trigger_user_update
    92  `,
    93  			},
    94  		},
    95  	}
    96  
    97  	Constructors[TypeLambda] = TypeSpec{
    98  		constructor: NewLambda,
    99  		Status:      docs.StatusDeprecated,
   100  		Categories: []Category{
   101  			CategoryIntegration,
   102  		},
   103  		Summary: `
   104  Invokes an AWS lambda for each message. The contents of the message is the
   105  payload of the request, and the result of the invocation will become the new
   106  contents of the message.`,
   107  		Description: `
   108  ## Alternatives
   109  
   110  This processor has been renamed to ` + "[`aws_lambda`](/docs/components/processors/aws_lambda)" + `.
   111  
   112  It is possible to perform requests per message of a batch in parallel by setting
   113  the ` + "`parallel`" + ` flag to ` + "`true`" + `. The ` + "`rate_limit`" + `
   114  field can be used to specify a rate limit [resource](/docs/components/rate_limits/about)
   115  to cap the rate of requests across parallel components service wide.
   116  
   117  In order to map or encode the payload to a specific request body, and map the
   118  response back into the original payload instead of replacing it entirely, you
   119  can use the ` + "[`branch` processor](/docs/components/processors/branch)" + `.
   120  
   121  ### Error Handling
   122  
   123  When all retry attempts for a message are exhausted the processor cancels the
   124  attempt. These failed messages will continue through the pipeline unchanged, but
   125  can be dropped or placed in a dead letter queue according to your config, you
   126  can read about these patterns [here](/docs/configuration/error_handling).
   127  
   128  ### Credentials
   129  
   130  By default Benthos will use a shared credentials file when connecting to AWS
   131  services. It's also possible to set them explicitly at the component level,
   132  allowing you to transfer data across accounts. You can find out more
   133  [in this document](/docs/guides/cloud/aws).`,
   134  		FieldSpecs: docs.FieldSpecs{
   135  			docs.FieldCommon("parallel", "Whether messages of a batch should be dispatched in parallel."),
   136  		}.Merge(client.FieldSpecs()),
   137  		Examples: []docs.AnnotatedExample{
   138  			{
   139  				Title: "Branched Invoke",
   140  				Summary: `
   141  This example uses a ` + "[`branch` processor](/docs/components/processors/branch/)" + ` to map a new payload for triggering a lambda function with an ID and username from the original message, and the result of the lambda is discarded, meaning the original message is unchanged.`,
   142  				Config: `
   143  pipeline:
   144    processors:
   145      - branch:
   146          request_map: '{"id":this.doc.id,"username":this.user.name}'
   147          processors:
   148            - lambda:
   149                function: trigger_user_update
   150  `,
   151  			},
   152  		},
   153  	}
   154  }
   155  
   156  //------------------------------------------------------------------------------
   157  
   158  // LambdaConfig contains configuration fields for the Lambda processor.
   159  type LambdaConfig struct {
   160  	client.Config `json:",inline" yaml:",inline"`
   161  	Parallel      bool `json:"parallel" yaml:"parallel"`
   162  }
   163  
   164  // NewLambdaConfig returns a LambdaConfig with default values.
   165  func NewLambdaConfig() LambdaConfig {
   166  	return LambdaConfig{
   167  		Config:   client.NewConfig(),
   168  		Parallel: false,
   169  	}
   170  }
   171  
   172  //------------------------------------------------------------------------------
   173  
   174  // Lambda is a processor that invokes an AWS Lambda using the message as the
   175  // request body, and returns the response.
   176  type Lambda struct {
   177  	client *client.Type
   178  
   179  	parallel bool
   180  
   181  	conf  LambdaConfig
   182  	log   log.Modular
   183  	stats metrics.Type
   184  
   185  	mCount     metrics.StatCounter
   186  	mErrLambda metrics.StatCounter
   187  	mErr       metrics.StatCounter
   188  	mSent      metrics.StatCounter
   189  	mBatchSent metrics.StatCounter
   190  }
   191  
   192  // NewAWSLambda returns a Lambda processor.
   193  func NewAWSLambda(
   194  	conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
   195  ) (Type, error) {
   196  	return newLambda(conf.AWSLambda, mgr, log, stats)
   197  }
   198  
   199  // NewLambda returns a Lambda processor.
   200  func NewLambda(
   201  	conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
   202  ) (Type, error) {
   203  	return newLambda(conf.Lambda, mgr, log, stats)
   204  }
   205  
   206  func newLambda(
   207  	conf LambdaConfig, mgr types.Manager, log log.Modular, stats metrics.Type,
   208  ) (Type, error) {
   209  	l := &Lambda{
   210  		conf:  conf,
   211  		log:   log,
   212  		stats: stats,
   213  
   214  		parallel: conf.Parallel,
   215  
   216  		mCount:     stats.GetCounter("count"),
   217  		mErrLambda: stats.GetCounter("error.lambda"),
   218  		mErr:       stats.GetCounter("error"),
   219  		mSent:      stats.GetCounter("sent"),
   220  		mBatchSent: stats.GetCounter("batch.sent"),
   221  	}
   222  	var err error
   223  	if l.client, err = client.New(
   224  		conf.Config,
   225  		client.OptSetLogger(l.log),
   226  		// TODO: V4 Remove this
   227  		client.OptSetStats(metrics.Namespaced(l.stats, "client")),
   228  		client.OptSetManager(mgr),
   229  	); err != nil {
   230  		return nil, err
   231  	}
   232  	return l, nil
   233  }
   234  
   235  //------------------------------------------------------------------------------
   236  
   237  // ProcessMessage applies the processor to a message, either creating >0
   238  // resulting messages or a response to be sent back to the message source.
   239  func (l *Lambda) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
   240  	l.mCount.Incr(1)
   241  
   242  	var resultMsg types.Message
   243  	if !l.parallel || msg.Len() == 1 {
   244  		resultMsg = msg.Copy()
   245  		IteratePartsWithSpanV2("aws_lambda", nil, resultMsg, func(i int, _ *tracing.Span, p types.Part) error {
   246  			if err := l.client.InvokeV2(p); err != nil {
   247  				l.mErr.Incr(1)
   248  				l.mErrLambda.Incr(1)
   249  				l.log.Errorf("Lambda function '%v' failed: %v\n", l.conf.Config.Function, err)
   250  				return err
   251  			}
   252  			return nil
   253  		})
   254  	} else {
   255  		parts := make([]types.Part, msg.Len())
   256  		msg.Iter(func(i int, p types.Part) error {
   257  			parts[i] = p.Copy()
   258  			return nil
   259  		})
   260  
   261  		wg := sync.WaitGroup{}
   262  		wg.Add(msg.Len())
   263  
   264  		for i := 0; i < msg.Len(); i++ {
   265  			go func(index int) {
   266  				result, err := l.client.Invoke(message.Lock(msg, index))
   267  				if err == nil && result.Len() != 1 {
   268  					err = fmt.Errorf("unexpected response size: %v", result.Len())
   269  				}
   270  				if err != nil {
   271  					l.mErr.Incr(1)
   272  					l.mErrLambda.Incr(1)
   273  					l.log.Errorf("Lambda parallel request to '%v' failed: %v\n", l.conf.Config.Function, err)
   274  					FlagErr(parts[index], err)
   275  				} else {
   276  					parts[index] = result.Get(0)
   277  				}
   278  
   279  				wg.Done()
   280  			}(i)
   281  		}
   282  
   283  		wg.Wait()
   284  		resultMsg = message.New(nil)
   285  		resultMsg.SetAll(parts)
   286  	}
   287  
   288  	msgs := [1]types.Message{resultMsg}
   289  
   290  	l.mBatchSent.Incr(1)
   291  	l.mSent.Incr(int64(resultMsg.Len()))
   292  	return msgs[:], nil
   293  }
   294  
   295  // CloseAsync shuts down the processor and stops processing requests.
   296  func (l *Lambda) CloseAsync() {
   297  }
   298  
   299  // WaitForClose blocks until the processor has closed down.
   300  func (l *Lambda) WaitForClose(timeout time.Duration) error {
   301  	return nil
   302  }
   303  
   304  //------------------------------------------------------------------------------