github.com/Jeffail/benthos/v3@v3.65.0/lib/output/redis_hash.go (about)

     1  package output
     2  
     3  import (
     4  	"github.com/Jeffail/benthos/v3/internal/docs"
     5  	"github.com/Jeffail/benthos/v3/internal/impl/redis"
     6  	"github.com/Jeffail/benthos/v3/lib/log"
     7  	"github.com/Jeffail/benthos/v3/lib/metrics"
     8  	"github.com/Jeffail/benthos/v3/lib/output/writer"
     9  	"github.com/Jeffail/benthos/v3/lib/types"
    10  )
    11  
    12  //------------------------------------------------------------------------------
    13  
    14  func init() {
    15  	Constructors[TypeRedisHash] = TypeSpec{
    16  		constructor: fromSimpleConstructor(NewRedisHash),
    17  		Summary: `
    18  Sets Redis hash objects using the HMSET command.`,
    19  		Description: `
    20  The field ` + "`key`" + ` supports
    21  [interpolation functions](/docs/configuration/interpolation#bloblang-queries), allowing
    22  you to create a unique key for each message.
    23  
    24  The field ` + "`fields`" + ` allows you to specify an explicit map of field
    25  names to interpolated values, also evaluated per message of a batch:
    26  
    27  ` + "```yaml" + `
    28  output:
    29    redis_hash:
    30      url: tcp://localhost:6379
    31      key: ${!json("id")}
    32      fields:
    33        topic: ${!meta("kafka_topic")}
    34        partition: ${!meta("kafka_partition")}
    35        content: ${!json("document.text")}
    36  ` + "```" + `
    37  
    38  If the field ` + "`walk_metadata`" + ` is set to ` + "`true`" + ` then Benthos
    39  will walk all metadata fields of messages and add them to the list of hash
    40  fields to set.
    41  
    42  If the field ` + "`walk_json_object`" + ` is set to ` + "`true`" + ` then
    43  Benthos will walk each message as a JSON object, extracting keys and the string
    44  representation of their value and adds them to the list of hash fields to set.
    45  
    46  The order of hash field extraction is as follows:
    47  
    48  1. Metadata (if enabled)
    49  2. JSON object (if enabled)
    50  3. Explicit fields
    51  
    52  Where latter stages will overwrite matching field names of a former stage.`,
    53  		Async: true,
    54  		FieldSpecs: redis.ConfigDocs().Add(
    55  			docs.FieldCommon(
    56  				"key", "The key for each message, function interpolations should be used to create a unique key per message.",
    57  				"${!meta(\"kafka_key\")}", "${!json(\"doc.id\")}", "${!count(\"msgs\")}",
    58  			).IsInterpolated(),
    59  			docs.FieldCommon("walk_metadata", "Whether all metadata fields of messages should be walked and added to the list of hash fields to set."),
    60  			docs.FieldCommon("walk_json_object", "Whether to walk each message as a JSON object and add each key/value pair to the list of hash fields to set."),
    61  			docs.FieldString("fields", "A map of key/value pairs to set as hash fields.").IsInterpolated().Map(),
    62  			docs.FieldCommon("max_in_flight", "The maximum number of messages to have in flight at a given time. Increase this to improve throughput."),
    63  		),
    64  		Categories: []Category{
    65  			CategoryServices,
    66  		},
    67  	}
    68  }
    69  
    70  //------------------------------------------------------------------------------
    71  
    72  // NewRedisHash creates a new RedisHash output type.
    73  func NewRedisHash(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
    74  	rhash, err := writer.NewRedisHashV2(conf.RedisHash, mgr, log, stats)
    75  	if err != nil {
    76  		return nil, err
    77  	}
    78  	a, err := NewAsyncWriter(
    79  		TypeRedisHash, conf.RedisHash.MaxInFlight, rhash, log, stats,
    80  	)
    81  	if err != nil {
    82  		return nil, err
    83  	}
    84  	return OnlySinglePayloads(a), nil
    85  }
    86  
    87  //------------------------------------------------------------------------------