github.com/Jeffail/benthos/v3@v3.65.0/lib/input/hdfs.go (about)

     1  package input
     2  
     3  import (
     4  	"errors"
     5  
     6  	"github.com/Jeffail/benthos/v3/internal/docs"
     7  	"github.com/Jeffail/benthos/v3/lib/input/reader"
     8  	"github.com/Jeffail/benthos/v3/lib/log"
     9  	"github.com/Jeffail/benthos/v3/lib/metrics"
    10  	"github.com/Jeffail/benthos/v3/lib/types"
    11  )
    12  
    13  //------------------------------------------------------------------------------
    14  
    15  func init() {
    16  	Constructors[TypeHDFS] = TypeSpec{
    17  		constructor: fromSimpleConstructor(NewHDFS),
    18  		Summary: `
    19  Reads files from a HDFS directory, where each discrete file will be consumed as
    20  a single message payload.`,
    21  		Description: `
    22  ### Metadata
    23  
    24  This input adds the following metadata fields to each message:
    25  
    26  ` + "``` text" + `
    27  - hdfs_name
    28  - hdfs_path
    29  ` + "```" + `
    30  
    31  You can access these metadata fields using
    32  [function interpolation](/docs/configuration/interpolation#metadata).`,
    33  		Categories: []Category{
    34  			CategoryServices,
    35  		},
    36  		FieldSpecs: docs.FieldSpecs{
    37  			docs.FieldCommon("hosts", "A list of target host addresses to connect to.").Array(),
    38  			docs.FieldCommon("user", "A user ID to connect as."),
    39  			docs.FieldCommon("directory", "The directory to consume from."),
    40  		},
    41  	}
    42  }
    43  
    44  //------------------------------------------------------------------------------
    45  
    46  // NewHDFS creates a new Files input type.
    47  func NewHDFS(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
    48  	if conf.HDFS.Directory == "" {
    49  		return nil, errors.New("invalid directory (cannot be empty)")
    50  	}
    51  	return NewAsyncReader(
    52  		TypeHDFS,
    53  		true,
    54  		reader.NewAsyncPreserver(
    55  			reader.NewHDFS(conf.HDFS, log, stats),
    56  		),
    57  		log, stats,
    58  	)
    59  }
    60  
    61  //------------------------------------------------------------------------------