github.com/muhammedhassanm/blockchain@v0.0.0-20200120143007-697261defd4d/sawtooth-core-master/docker/telegraf.conf (about)

     1  # Telegraf Configuration
     2  #
     3  # Telegraf is entirely plugin driven. All metrics are gathered from the
     4  # declared inputs, and sent to the declared outputs.
     5  #
     6  # Plugins must be declared in here to be active.
     7  # To deactivate a plugin, comment out the name and any variables.
     8  #
     9  # Use 'telegraf -config telegraf.conf -test' to see what metrics a config
    10  # file would generate.
    11  #
    12  # Environment variables can be used anywhere in this config file, simply prepend
    13  # them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
    14  # for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
    15  
    16  
    17  # Global tags can be specified here in key="value" format.
    18  [global_tags]
    19    # dc = "us-east-1" # will tag all metrics with dc=us-east-1
    20    # rack = "1a"
    21    ## Environment variables can be used as tags, and throughout the config file
    22    # user = "$USER"
    23  
    24  
    25  # Configuration for telegraf agent
    26  [agent]
    27    ## Default data collection interval for all inputs
    28    interval = "10s"
    29    ## Rounds collection interval to 'interval'
    30    ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
    31    round_interval = true
    32  
    33    ## Telegraf will send metrics to outputs in batches of at most
    34    ## metric_batch_size metrics.
    35    ## This controls the size of writes that Telegraf sends to output plugins.
    36    metric_batch_size = 1000
    37  
    38    ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
    39    ## output, and will flush this buffer on a successful write. Oldest metrics
    40    ## are dropped first when this buffer fills.
    41    ## This buffer only fills when writes fail to output plugin(s).
    42    metric_buffer_limit = 10000
    43  
    44    ## Collection jitter is used to jitter the collection by a random amount.
    45    ## Each plugin will sleep for a random time within jitter before collecting.
    46    ## This can be used to avoid many plugins querying things like sysfs at the
    47    ## same time, which can have a measurable effect on the system.
    48    collection_jitter = "0s"
    49  
    50    ## Default flushing interval for all outputs. You shouldn't set this below
    51    ## interval. Maximum flush_interval will be flush_interval + flush_jitter
    52    flush_interval = "10s"
    53    ## Jitter the flush interval by a random amount. This is primarily to avoid
    54    ## large write spikes for users running a large number of telegraf instances.
    55    ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
    56    flush_jitter = "0s"
    57  
    58    ## By default or when set to "0s", precision will be set to the same
    59    ## timestamp order as the collection interval, with the maximum being 1s.
    60    ##   ie, when interval = "10s", precision will be "1s"
    61    ##       when interval = "250ms", precision will be "1ms"
    62    ## Precision will NOT be used for service inputs. It is up to each individual
    63    ## service input to set the timestamp at the appropriate precision.
    64    ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
    65    precision = ""
    66  
    67    ## Logging configuration:
    68    ## Run telegraf with debug log messages.
    69    debug = false
    70    ## Run telegraf in quiet mode (error log messages only).
    71    quiet = false
    72    ## Specify the log file name. The empty string means to log to stderr.
    73    logfile = ""
    74  
    75    ## Override default hostname, if empty use os.Hostname()
    76    hostname = ""
    77    ## If set to true, do no set the "host" tag in the telegraf agent.
    78    omit_hostname = false
    79  
    80  
    81  ###############################################################################
    82  #                            OUTPUT PLUGINS                                   #
    83  ###############################################################################
    84  
    85  # Configuration for influxdb server to send metrics to
    86  [[outputs.influxdb]]
    87    ## The HTTP or UDP URL for your InfluxDB instance.  Each item should be
    88    ## of the form:
    89    ##   scheme "://" host [ ":" port]
    90    ##
    91    ## Multiple urls can be specified as part of the same cluster,
    92    ## this means that only ONE of the urls will be written to each interval.
    93    # urls = ["udp://localhost:8089"] # UDP endpoint example
    94    urls = ["http://influxdb:8086"] # required
    95    ## The target database for metrics (telegraf will create it if not exists).
    96    database = "metrics" # required
    97  
    98    ## Name of existing retention policy to write to.  Empty string writes to
    99    ## the default retention policy.
   100    retention_policy = ""
   101    ## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
   102    write_consistency = "any"
   103  
   104    ## Write timeout (for the InfluxDB client), formatted as a string.
   105    ## If not provided, will default to 5s. 0s means no timeout (not recommended).
   106    timeout = "5s"
   107    # username = "telegraf"
   108    # password = "metricsmetricsmetricsmetrics"
   109    ## Set the user agent for HTTP POSTs (can be useful for log differentiation)
   110    # user_agent = "telegraf"
   111    ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
   112    # udp_payload = 512
   113  
   114    ## Optional SSL Config
   115    # ssl_ca = "/etc/telegraf/ca.pem"
   116    # ssl_cert = "/etc/telegraf/cert.pem"
   117    # ssl_key = "/etc/telegraf/key.pem"
   118    ## Use SSL but skip chain & host verification
   119    # insecure_skip_verify = false
   120  
   121  
   122  # # Configuration for Amon Server to send metrics to.
   123  # [[outputs.amon]]
   124  #   ## Amon Server Key
   125  #   server_key = "my-server-key" # required.
   126  #
   127  #   ## Amon Instance URL
   128  #   amon_instance = "https://youramoninstance" # required
   129  #
   130  #   ## Connection timeout.
   131  #   # timeout = "5s"
   132  
   133  
   134  # # Configuration for the AMQP server to send metrics to
   135  # [[outputs.amqp]]
   136  #   ## AMQP url
   137  #   url = "amqp://localhost:5672/influxdb"
   138  #   ## AMQP exchange
   139  #   exchange = "telegraf"
   140  #   ## Auth method. PLAIN and EXTERNAL are supported
   141  #   ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
   142  #   ## described here: https://www.rabbitmq.com/plugins.html
   143  #   # auth_method = "PLAIN"
   144  #   ## Telegraf tag to use as a routing key
   145  #   ##  ie, if this tag exists, its value will be used as the routing key
   146  #   routing_tag = "host"
   147  #
   148  #   ## InfluxDB retention policy
   149  #   # retention_policy = "default"
   150  #   ## InfluxDB database
   151  #   # database = "telegraf"
   152  #
   153  #   ## Write timeout, formatted as a string.  If not provided, will default
   154  #   ## to 5s. 0s means no timeout (not recommended).
   155  #   # timeout = "5s"
   156  #
   157  #   ## Optional SSL Config
   158  #   # ssl_ca = "/etc/telegraf/ca.pem"
   159  #   # ssl_cert = "/etc/telegraf/cert.pem"
   160  #   # ssl_key = "/etc/telegraf/key.pem"
   161  #   ## Use SSL but skip chain & host verification
   162  #   # insecure_skip_verify = false
   163  #
   164  #   ## Data format to output.
   165  #   ## Each data format has its own unique set of configuration options, read
   166  #   ## more about them here:
   167  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
   168  #   data_format = "influx"
   169  
   170  
   171  # # Configuration for AWS CloudWatch output.
   172  # [[outputs.cloudwatch]]
   173  #   ## Amazon REGION
   174  #   region = "us-east-1"
   175  #
   176  #   ## Amazon Credentials
   177  #   ## Credentials are loaded in the following order
   178  #   ## 1) Assumed credentials via STS if role_arn is specified
   179  #   ## 2) explicit credentials from 'access_key' and 'secret_key'
   180  #   ## 3) shared profile from 'profile'
   181  #   ## 4) environment variables
   182  #   ## 5) shared credentials file
   183  #   ## 6) EC2 Instance Profile
   184  #   #access_key = ""
   185  #   #secret_key = ""
   186  #   #token = ""
   187  #   #role_arn = ""
   188  #   #profile = ""
   189  #   #shared_credential_file = ""
   190  #
   191  #   ## Namespace for the CloudWatch MetricDatums
   192  #   namespace = "InfluxData/Telegraf"
   193  
   194  
   195  # # Configuration for DataDog API to send metrics to.
   196  # [[outputs.datadog]]
   197  #   ## Datadog API key
   198  #   apikey = "my-secret-key" # required.
   199  #
   200  #   ## Connection timeout.
   201  #   # timeout = "5s"
   202  
   203  
   204  # # Send metrics to nowhere at all
   205  # [[outputs.discard]]
   206  #   # no configuration
   207  
   208  
   209  # # Configuration for Elasticsearch to send metrics to.
   210  # [[outputs.elasticsearch]]
   211  #   ## The full HTTP endpoint URL for your Elasticsearch instance
   212  #   ## Multiple urls can be specified as part of the same cluster,
   213  #   ## this means that only ONE of the urls will be written to each interval.
   214  #   urls = [ "http://node1.es.example.com:9200" ] # required.
   215  #   ## Elasticsearch client timeout, defaults to "5s" if not set.
   216  #   timeout = "5s"
   217  #   ## Set to true to ask Elasticsearch a list of all cluster nodes,
   218  #   ## thus it is not necessary to list all nodes in the urls config option.
   219  #   enable_sniffer = false
   220  #   ## Set the interval to check if the Elasticsearch nodes are available
   221  #   ## Setting to "0s" will disable the health check (not recommended in production)
   222  #   health_check_interval = "10s"
   223  #   ## HTTP basic authentication details (eg. when using Shield)
   224  #   # username = "telegraf"
   225  #   # password = "mypassword"
   226  #
   227  #   ## Index Config
   228  #   ## The target index for metrics (Elasticsearch will create if it not exists).
   229  #   ## You can use the date specifiers below to create indexes per time frame.
   230  #   ## The metric timestamp will be used to decide the destination index name
   231  #   # %Y - year (2016)
   232  #   # %y - last two digits of year (00..99)
   233  #   # %m - month (01..12)
   234  #   # %d - day of month (e.g., 01)
   235  #   # %H - hour (00..23)
   236  #   index_name = "telegraf-%Y.%m.%d" # required.
   237  #
   238  #   ## Template Config
   239  #   ## Set to true if you want telegraf to manage its index template.
   240  #   ## If enabled it will create a recommended index template for telegraf indexes
   241  #   manage_template = true
   242  #   ## The template name used for telegraf indexes
   243  #   template_name = "telegraf"
   244  #   ## Set to true if you want telegraf to overwrite an existing template
   245  #   overwrite_template = false
   246  
   247  
   248  # # Send telegraf metrics to file(s)
   249  # [[outputs.file]]
   250  #   ## Files to write to, "stdout" is a specially handled file.
   251  #   files = ["stdout", "/tmp/metrics.out"]
   252  #
   253  #   ## Data format to output.
   254  #   ## Each data format has its own unique set of configuration options, read
   255  #   ## more about them here:
   256  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
   257  #   data_format = "influx"
   258  
   259  
   260  # # Configuration for Graphite server to send metrics to
   261  # [[outputs.graphite]]
   262  #   ## TCP endpoint for your graphite instance.
   263  #   ## If multiple endpoints are configured, output will be load balanced.
   264  #   ## Only one of the endpoints will be written to with each iteration.
   265  #   servers = ["localhost:2003"]
   266  #   ## Prefix metrics name
   267  #   prefix = ""
   268  #   ## Graphite output template
   269  #   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
   270  #   template = "host.tags.measurement.field"
   271  #   ## timeout in seconds for the write connection to graphite
   272  #   timeout = 2
   273  
   274  
   275  # # Send telegraf metrics to graylog(s)
   276  # [[outputs.graylog]]
   277  #   ## UDP endpoint for your graylog instance.
   278  #   servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
   279  
   280  
   281  # # Configuration for sending metrics to an Instrumental project
   282  # [[outputs.instrumental]]
   283  #   ## Project API Token (required)
   284  #   api_token = "API Token" # required
   285  #   ## Prefix the metrics with a given name
   286  #   prefix = ""
   287  #   ## Stats output template (Graphite formatting)
   288  #   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
   289  #   template = "host.tags.measurement.field"
   290  #   ## Timeout in seconds to connect
   291  #   timeout = "2s"
   292  #   ## Display Communcation to Instrumental
   293  #   debug = false
   294  
   295  
   296  # # Configuration for the Kafka server to send metrics to
   297  # [[outputs.kafka]]
   298  #   ## URLs of kafka brokers
   299  #   brokers = ["localhost:9092"]
   300  #   ## Kafka topic for producer messages
   301  #   topic = "telegraf"
   302  #   ## Telegraf tag to use as a routing key
   303  #   ##  ie, if this tag exists, its value will be used as the routing key
   304  #   routing_tag = "host"
   305  #
   306  #   ## CompressionCodec represents the various compression codecs recognized by
   307  #   ## Kafka in messages.
   308  #   ##  0 : No compression
   309  #   ##  1 : Gzip compression
   310  #   ##  2 : Snappy compression
   311  #   compression_codec = 0
   312  #
   313  #   ##  RequiredAcks is used in Produce Requests to tell the broker how many
   314  #   ##  replica acknowledgements it must see before responding
   315  #   ##   0 : the producer never waits for an acknowledgement from the broker.
   316  #   ##       This option provides the lowest latency but the weakest durability
   317  #   ##       guarantees (some data will be lost when a server fails).
   318  #   ##   1 : the producer gets an acknowledgement after the leader replica has
   319  #   ##       received the data. This option provides better durability as the
   320  #   ##       client waits until the server acknowledges the request as successful
   321  #   ##       (only messages that were written to the now-dead leader but not yet
   322  #   ##       replicated will be lost).
   323  #   ##   -1: the producer gets an acknowledgement after all in-sync replicas have
   324  #   ##       received the data. This option provides the best durability, we
   325  #   ##       guarantee that no messages will be lost as long as at least one in
   326  #   ##       sync replica remains.
   327  #   required_acks = -1
   328  #
   329  #   ##  The total number of times to retry sending a message
   330  #   max_retry = 3
   331  #
   332  #   ## Optional SSL Config
   333  #   # ssl_ca = "/etc/telegraf/ca.pem"
   334  #   # ssl_cert = "/etc/telegraf/cert.pem"
   335  #   # ssl_key = "/etc/telegraf/key.pem"
   336  #   ## Use SSL but skip chain & host verification
   337  #   # insecure_skip_verify = false
   338  #
   339  #   ## Optional SASL Config
   340  #   # sasl_username = "kafka"
   341  #   # sasl_password = "secret"
   342  #
   343  #   ## Data format to output.
   344  #   ## Each data format has its own unique set of configuration options, read
   345  #   ## more about them here:
   346  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
   347  #   data_format = "influx"
   348  
   349  
   350  # # Configuration for the AWS Kinesis output.
   351  # [[outputs.kinesis]]
   352  #   ## Amazon REGION of kinesis endpoint.
   353  #   region = "ap-southeast-2"
   354  #
   355  #   ## Amazon Credentials
   356  #   ## Credentials are loaded in the following order
   357  #   ## 1) Assumed credentials via STS if role_arn is specified
   358  #   ## 2) explicit credentials from 'access_key' and 'secret_key'
   359  #   ## 3) shared profile from 'profile'
   360  #   ## 4) environment variables
   361  #   ## 5) shared credentials file
   362  #   ## 6) EC2 Instance Profile
   363  #   #access_key = ""
   364  #   #secret_key = ""
   365  #   #token = ""
   366  #   #role_arn = ""
   367  #   #profile = ""
   368  #   #shared_credential_file = ""
   369  #
   370  #   ## Kinesis StreamName must exist prior to starting telegraf.
   371  #   streamname = "StreamName"
   372  #   ## PartitionKey as used for sharding data.
   373  #   partitionkey = "PartitionKey"
   374  #   ## If set the paritionKey will be a random UUID on every put.
   375  #   ## This allows for scaling across multiple shards in a stream.
   376  #   ## This will cause issues with ordering.
   377  #   use_random_partitionkey = false
   378  #
   379  #
   380  #   ## Data format to output.
   381  #   ## Each data format has its own unique set of configuration options, read
   382  #   ## more about them here:
   383  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
   384  #   data_format = "influx"
   385  #
   386  #   ## debug will show upstream aws messages.
   387  #   debug = false
   388  
   389  
   390  # # Configuration for Librato API to send metrics to.
   391  # [[outputs.librato]]
   392  #   ## Librator API Docs
   393  #   ## http://dev.librato.com/v1/metrics-authentication
   394  #   ## Librato API user
   395  #   api_user = "telegraf@influxdb.com" # required.
   396  #   ## Librato API token
   397  #   api_token = "my-secret-token" # required.
   398  #   ## Debug
   399  #   # debug = false
   400  #   ## Connection timeout.
   401  #   # timeout = "5s"
   402  #   ## Output source Template (same as graphite buckets)
   403  #   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
   404  #   ## This template is used in librato's source (not metric's name)
   405  #   template = "host"
   406  #
   407  
   408  
   409  # # Configuration for MQTT server to send metrics to
   410  # [[outputs.mqtt]]
   411  #   servers = ["localhost:1883"] # required.
   412  #
   413  #   ## MQTT outputs send metrics to this topic format
   414  #   ##    "<topic_prefix>/<hostname>/<pluginname>/"
   415  #   ##   ex: prefix/web01.example.com/mem
   416  #   topic_prefix = "telegraf"
   417  #
   418  #   ## username and password to connect MQTT server.
   419  #   # username = "telegraf"
   420  #   # password = "metricsmetricsmetricsmetrics"
   421  #
   422  #   ## client ID, if not set a random ID is generated
   423  #   # client_id = ""
   424  #
   425  #   ## Optional SSL Config
   426  #   # ssl_ca = "/etc/telegraf/ca.pem"
   427  #   # ssl_cert = "/etc/telegraf/cert.pem"
   428  #   # ssl_key = "/etc/telegraf/key.pem"
   429  #   ## Use SSL but skip chain & host verification
   430  #   # insecure_skip_verify = false
   431  #
   432  #   ## Data format to output.
   433  #   ## Each data format has its own unique set of configuration options, read
   434  #   ## more about them here:
   435  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
   436  #   data_format = "influx"
   437  
   438  
   439  # # Send telegraf measurements to NATS
   440  # [[outputs.nats]]
   441  #   ## URLs of NATS servers
   442  #   servers = ["nats://localhost:4222"]
   443  #   ## Optional credentials
   444  #   # username = ""
   445  #   # password = ""
   446  #   ## NATS subject for producer messages
   447  #   subject = "telegraf"
   448  #
   449  #   ## Optional SSL Config
   450  #   # ssl_ca = "/etc/telegraf/ca.pem"
   451  #   # ssl_cert = "/etc/telegraf/cert.pem"
   452  #   # ssl_key = "/etc/telegraf/key.pem"
   453  #   ## Use SSL but skip chain & host verification
   454  #   # insecure_skip_verify = false
   455  #
   456  #   ## Data format to output.
   457  #   ## Each data format has its own unique set of configuration options, read
   458  #   ## more about them here:
   459  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
   460  #   data_format = "influx"
   461  
   462  
   463  # # Send telegraf measurements to NSQD
   464  # [[outputs.nsq]]
   465  #   ## Location of nsqd instance listening on TCP
   466  #   server = "localhost:4150"
   467  #   ## NSQ topic for producer messages
   468  #   topic = "telegraf"
   469  #
   470  #   ## Data format to output.
   471  #   ## Each data format has its own unique set of configuration options, read
   472  #   ## more about them here:
   473  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
   474  #   data_format = "influx"
   475  
   476  
   477  # # Configuration for OpenTSDB server to send metrics to
   478  # [[outputs.opentsdb]]
   479  #   ## prefix for metrics keys
   480  #   prefix = "my.specific.prefix."
   481  #
   482  #   ## DNS name of the OpenTSDB server
   483  #   ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
   484  #   ## telnet API. "http://opentsdb.example.com" will use the Http API.
   485  #   host = "opentsdb.example.com"
   486  #
   487  #   ## Port of the OpenTSDB server
   488  #   port = 4242
   489  #
   490  #   ## Number of data points to send to OpenTSDB in Http requests.
   491  #   ## Not used with telnet API.
   492  #   httpBatchSize = 50
   493  #
   494  #   ## Debug true - Prints OpenTSDB communication
   495  #   debug = false
   496  
   497  
   498  # # Configuration for the Prometheus client to spawn
   499  # [[outputs.prometheus_client]]
   500  #   ## Address to listen on
   501  #   # listen = ":9126"
   502  #
   503  #   ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
   504  #   # expiration_interval = "60s"
   505  
   506  
   507  # # Configuration for the Riemann server to send metrics to
   508  # [[outputs.riemann]]
   509  #   ## The full TCP or UDP URL of the Riemann server
   510  #   url = "tcp://localhost:5555"
   511  #
   512  #   ## Riemann event TTL, floating-point time in seconds.
   513  #   ## Defines how long that an event is considered valid for in Riemann
   514  #   # ttl = 30.0
   515  #
   516  #   ## Separator to use between measurement and field name in Riemann service name
   517  #   ## This does not have any effect if 'measurement_as_attribute' is set to 'true'
   518  #   separator = "/"
   519  #
   520  #   ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
   521  #   # measurement_as_attribute = false
   522  #
   523  #   ## Send string metrics as Riemann event states.
   524  #   ## Unless enabled all string metrics will be ignored
   525  #   # string_as_state = false
   526  #
   527  #   ## A list of tag keys whose values get sent as Riemann tags.
   528  #   ## If empty, all Telegraf tag values will be sent as tags
   529  #   # tag_keys = ["telegraf","custom_tag"]
   530  #
   531  #   ## Additional Riemann tags to send.
   532  #   # tags = ["telegraf-output"]
   533  #
   534  #   ## Description for Riemann event
   535  #   # description_text = "metrics collected from telegraf"
   536  #
   537  #   ## Riemann client write timeout, defaults to "5s" if not set.
   538  #   # timeout = "5s"
   539  
   540  
   541  # # Configuration for the Riemann server to send metrics to
   542  # [[outputs.riemann_legacy]]
   543  #   ## URL of server
   544  #   url = "localhost:5555"
   545  #   ## transport protocol to use either tcp or udp
   546  #   transport = "tcp"
   547  #   ## separator to use between input name and field name in Riemann service name
   548  #   separator = " "
   549  
   550  
   551  # # Generic socket writer capable of handling multiple socket types.
   552  # [[outputs.socket_writer]]
   553  #   ## URL to connect to
   554  #   # address = "tcp://127.0.0.1:8094"
   555  #   # address = "tcp://example.com:http"
   556  #   # address = "tcp4://127.0.0.1:8094"
   557  #   # address = "tcp6://127.0.0.1:8094"
   558  #   # address = "tcp6://[2001:db8::1]:8094"
   559  #   # address = "udp://127.0.0.1:8094"
   560  #   # address = "udp4://127.0.0.1:8094"
   561  #   # address = "udp6://127.0.0.1:8094"
   562  #   # address = "unix:///tmp/telegraf.sock"
   563  #   # address = "unixgram:///tmp/telegraf.sock"
   564  #
   565  #   ## Period between keep alive probes.
   566  #   ## Only applies to TCP sockets.
   567  #   ## 0 disables keep alive probes.
   568  #   ## Defaults to the OS configuration.
   569  #   # keep_alive_period = "5m"
   570  #
   571  #   ## Data format to generate.
   572  #   ## Each data format has its own unique set of configuration options, read
   573  #   ## more about them here:
   574  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
   575  #   # data_format = "influx"
   576  
   577  
   578  
   579  ###############################################################################
   580  #                            PROCESSOR PLUGINS                                #
   581  ###############################################################################
   582  
   583  # # Print all metrics that pass through this filter.
   584  # [[processors.printer]]
   585  
   586  
   587  
   588  ###############################################################################
   589  #                            AGGREGATOR PLUGINS                               #
   590  ###############################################################################
   591  
   592  # # Keep the aggregate min/max of each metric passing through.
   593  # [[aggregators.minmax]]
   594  #   ## General Aggregator Arguments:
   595  #   ## The period on which to flush & clear the aggregator.
   596  #   period = "30s"
   597  #   ## If true, the original metric will be dropped by the
   598  #   ## aggregator and will not get sent to the output plugins.
   599  #   drop_original = false
   600  
   601  
   602  
   603  ###############################################################################
   604  #                            INPUT PLUGINS                                    #
   605  ###############################################################################
   606  
   607  # Read metrics about cpu usage
   608  [[inputs.cpu]]
   609    ## Whether to report per-cpu stats or not
   610    percpu = true
   611    ## Whether to report total system cpu stats or not
   612    totalcpu = true
   613    ## If true, collect raw CPU time metrics.
   614    collect_cpu_time = false
   615  
   616  
   617  # Read metrics about disk usage by mount point
   618  [[inputs.disk]]
   619    ## By default, telegraf gather stats for all mountpoints.
   620    ## Setting mountpoints will restrict the stats to the specified mountpoints.
   621    # mount_points = ["/"]
   622  
   623    ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
   624    ## present on /run, /var/run, /dev/shm or /dev).
   625    ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
   626  
   627  
   628  # Read metrics about disk IO by device
   629  [[inputs.diskio]]
   630    ## By default, telegraf will gather stats for all devices including
   631    ## disk partitions.
   632    ## Setting devices will restrict the stats to the specified devices.
   633    # devices = ["sda", "sdb"]
   634    ## Uncomment the following line if you need disk serial numbers.
   635    # skip_serial_number = false
   636    #
   637    ## On systems which support it, device metadata can be added in the form of
   638    ## tags.
   639    ## Currently only Linux is supported via udev properties. You can view
   640    ## available properties for a device by running:
   641    ## 'udevadm info -q property -n /dev/sda'
   642    # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
   643    #
   644    ## Using the same metadata source as device_tags, you can also customize the
   645    ## name of the device via templates.
   646    ## The 'name_templates' parameter is a list of templates to try and apply to
   647    ## the device. The template may contain variables in the form of '$PROPERTY' or
   648    ## '${PROPERTY}'. The first template which does not contain any variables not
   649    ## present for the device is used as the device name tag.
   650    ## The typical use case is for LVM volumes, to get the VG/LV name instead of
   651    ## the near-meaningless DM-0 name.
   652    # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
   653  
   654  
   655  # Get kernel statistics from /proc/stat
   656  [[inputs.kernel]]
   657    # no configuration
   658  
   659  
   660  # Read metrics about memory usage
   661  [[inputs.mem]]
   662    # no configuration
   663  
   664  
   665  # Get the number of processes and group them by status
   666  [[inputs.processes]]
   667    # no configuration
   668  
   669  
   670  # Read metrics about swap memory usage
   671  [[inputs.swap]]
   672    # no configuration
   673  
   674  
   675  # Read metrics about system load & uptime
   676  [[inputs.system]]
   677    # no configuration
   678  
   679  
   680  # # Read stats from aerospike server(s)
   681  # [[inputs.aerospike]]
   682  #   ## Aerospike servers to connect to (with port)
   683  #   ## This plugin will query all namespaces the aerospike
   684  #   ## server has configured and get stats for them.
   685  #   servers = ["localhost:3000"]
   686  
   687  
   688  # # Read Apache status information (mod_status)
   689  # [[inputs.apache]]
   690  #   ## An array of Apache status URI to gather stats.
   691  #   ## Default is "http://localhost/server-status?auto".
   692  #   urls = ["http://localhost/server-status?auto"]
   693  #   ## user credentials for basic HTTP authentication
   694  #   username = "myuser"
   695  #   password = "mypassword"
   696  #
   697  #   ## Timeout to the complete conection and reponse time in seconds
   698  #   response_timeout = "25s" ## default to 5 seconds
   699  #
   700  #   ## Optional SSL Config
   701  #   # ssl_ca = "/etc/telegraf/ca.pem"
   702  #   # ssl_cert = "/etc/telegraf/cert.pem"
   703  #   # ssl_key = "/etc/telegraf/key.pem"
   704  #   ## Use SSL but skip chain & host verification
   705  #   # insecure_skip_verify = false
   706  
   707  
   708  # # Read metrics of bcache from stats_total and dirty_data
   709  # [[inputs.bcache]]
   710  #   ## Bcache sets path
   711  #   ## If not specified, then default is:
   712  #   bcachePath = "/sys/fs/bcache"
   713  #
   714  #   ## By default, telegraf gather stats for all bcache devices
   715  #   ## Setting devices will restrict the stats to the specified
   716  #   ## bcache devices.
   717  #   bcacheDevs = ["bcache0"]
   718  
   719  
   720  # # Read Cassandra metrics through Jolokia
   721  # [[inputs.cassandra]]
   722  #   # This is the context root used to compose the jolokia url
   723  #   context = "/jolokia/read"
   724  #   ## List of cassandra servers exposing jolokia read service
   725  #   servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
   726  #   ## List of metrics collected on above servers
   727  #   ## Each metric consists of a jmx path.
   728  #   ## This will collect all heap memory usage metrics from the jvm and
   729  #   ## ReadLatency metrics for all keyspaces and tables.
   730  #   ## "type=Table" in the query works with Cassandra3.0. Older versions might
   731  #   ## need to use "type=ColumnFamily"
   732  #   metrics  = [
   733  #     "/java.lang:type=Memory/HeapMemoryUsage",
   734  #     "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
   735  #   ]
   736  
   737  
   738  # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
   739  # [[inputs.ceph]]
   740  #   ## This is the recommended interval to poll.  Too frequent and you will lose
   741  #   ## data points due to timeouts during rebalancing and recovery
   742  #   interval = '1m'
   743  #
   744  #   ## All configuration values are optional, defaults are shown below
   745  #
   746  #   ## location of ceph binary
   747  #   ceph_binary = "/usr/bin/ceph"
   748  #
   749  #   ## directory in which to look for socket files
   750  #   socket_dir = "/var/run/ceph"
   751  #
   752  #   ## prefix of MON and OSD socket files, used to determine socket type
   753  #   mon_prefix = "ceph-mon"
   754  #   osd_prefix = "ceph-osd"
   755  #
   756  #   ## suffix used to identify socket files
   757  #   socket_suffix = "asok"
   758  #
   759  #   ## Ceph user to authenticate as
   760  #   ceph_user = "client.admin"
   761  #
   762  #   ## Ceph configuration to use to locate the cluster
   763  #   ceph_config = "/etc/ceph/ceph.conf"
   764  #
   765  #   ## Whether to gather statistics via the admin socket
   766  #   gather_admin_socket_stats = true
   767  #
   768  #   ## Whether to gather statistics via ceph commands
   769  #   gather_cluster_stats = false
   770  
   771  
   772  # # Read specific statistics per cgroup
   773  # [[inputs.cgroup]]
   774  #   ## Directories in which to look for files, globs are supported.
   775  #   ## Consider restricting paths to the set of cgroups you really
   776  #   ## want to monitor if you have a large number of cgroups, to avoid
   777  #   ## any cardinality issues.
   778  #   # paths = [
   779  #   #   "/cgroup/memory",
   780  #   #   "/cgroup/memory/child1",
   781  #   #   "/cgroup/memory/child2/*",
   782  #   # ]
   783  #   ## cgroup stat fields, as file names, globs are supported.
   784  #   ## these file names are appended to each path from above.
   785  #   # files = ["memory.*usage*", "memory.limit_in_bytes"]
   786  
   787  
   788  # # Get standard chrony metrics, requires chronyc executable.
   789  # [[inputs.chrony]]
   790  #   ## If true, chronyc tries to perform a DNS lookup for the time server.
   791  #   # dns_lookup = false
   792  
   793  
   794  # # Pull Metric Statistics from Amazon CloudWatch
   795  # [[inputs.cloudwatch]]
   796  #   ## Amazon Region
   797  #   region = "us-east-1"
   798  #
   799  #   ## Amazon Credentials
   800  #   ## Credentials are loaded in the following order
   801  #   ## 1) Assumed credentials via STS if role_arn is specified
   802  #   ## 2) explicit credentials from 'access_key' and 'secret_key'
   803  #   ## 3) shared profile from 'profile'
   804  #   ## 4) environment variables
   805  #   ## 5) shared credentials file
   806  #   ## 6) EC2 Instance Profile
   807  #   #access_key = ""
   808  #   #secret_key = ""
   809  #   #token = ""
   810  #   #role_arn = ""
   811  #   #profile = ""
   812  #   #shared_credential_file = ""
   813  #
   814  #   # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
   815  #   # metrics are made available to the 1 minute period. Some are collected at
   816  #   # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
   817  #   # Note that if a period is configured that is smaller than the minimum for a
   818  #   # particular metric, that metric will not be returned by the Cloudwatch API
   819  #   # and will not be collected by Telegraf.
   820  #   #
   821  #   ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
   822  #   period = "5m"
   823  #
   824  #   ## Collection Delay (required - must account for metrics availability via CloudWatch API)
   825  #   delay = "5m"
   826  #
   827  #   ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
   828  #   ## gaps or overlap in pulled data
   829  #   interval = "5m"
   830  #
   831  #   ## Configure the TTL for the internal cache of metrics.
   832  #   ## Defaults to 1 hr if not specified
   833  #   #cache_ttl = "10m"
   834  #
   835  #   ## Metric Statistic Namespace (required)
   836  #   namespace = "AWS/ELB"
   837  #
   838  #   ## Maximum requests per second. Note that the global default AWS rate limit is
   839  #   ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
   840  #   ## maximum of 400. Optional - default value is 200.
   841  #   ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
   842  #   ratelimit = 200
   843  #
   844  #   ## Metrics to Pull (optional)
   845  #   ## Defaults to all Metrics in Namespace if nothing is provided
   846  #   ## Refreshes Namespace available metrics every 1h
   847  #   #[[inputs.cloudwatch.metrics]]
   848  #   #  names = ["Latency", "RequestCount"]
   849  #   #
   850  #   #  ## Dimension filters for Metric (optional)
   851  #   #  [[inputs.cloudwatch.metrics.dimensions]]
   852  #   #    name = "LoadBalancerName"
   853  #   #    value = "p-example"
   854  
   855  
   856  # # Collects conntrack stats from the configured directories and files.
   857  # [[inputs.conntrack]]
   858  #    ## The following defaults would work with multiple versions of conntrack.
   859  #    ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
   860  #    ## kernel versions, as are the directory locations.
   861  #
   862  #    ## Superset of filenames to look for within the conntrack dirs.
   863  #    ## Missing files will be ignored.
   864  #    files = ["ip_conntrack_count","ip_conntrack_max",
   865  #             "nf_conntrack_count","nf_conntrack_max"]
   866  #
   867  #    ## Directories to search within for the conntrack files above.
   868  #    ## Missing directrories will be ignored.
   869  #    dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
   870  
   871  
   872  # # Gather health check statuses from services registered in Consul
   873  # [[inputs.consul]]
   874  #   ## Most of these values defaults to the one configured on a Consul's agent level.
   875  #   ## Optional Consul server address (default: "localhost")
   876  #   # address = "localhost"
   877  #   ## Optional URI scheme for the Consul server (default: "http")
   878  #   # scheme = "http"
   879  #   ## Optional ACL token used in every request (default: "")
   880  #   # token = ""
   881  #   ## Optional username used for request HTTP Basic Authentication (default: "")
   882  #   # username = ""
   883  #   ## Optional password used for HTTP Basic Authentication (default: "")
   884  #   # password = ""
   885  #   ## Optional data centre to query the health checks from (default: "")
   886  #   # datacentre = ""
   887  
   888  
   889  # # Read metrics from one or many couchbase clusters
   890  # [[inputs.couchbase]]
   891  #   ## specify servers via a url matching:
   892  #   ##  [protocol://][:password]@address[:port]
   893  #   ##  e.g.
   894  #   ##    http://couchbase-0.example.com/
   895  #   ##    http://admin:secret@couchbase-0.example.com:8091/
   896  #   ##
   897  #   ## If no servers are specified, then localhost is used as the host.
   898  #   ## If no protocol is specifed, HTTP is used.
   899  #   ## If no port is specified, 8091 is used.
   900  #   servers = ["http://localhost:8091"]
   901  
   902  
   903  # # Read CouchDB Stats from one or more servers
   904  # [[inputs.couchdb]]
   905  #   ## Works with CouchDB stats endpoints out of the box
   906  #   ## Multiple HOSTs from which to read CouchDB stats:
   907  #   hosts = ["http://localhost:8086/_stats"]
   908  
   909  
   910  # # Read metrics from one or many disque servers
   911  # [[inputs.disque]]
   912  #   ## An array of URI to gather stats about. Specify an ip or hostname
   913  #   ## with optional port and password.
   914  #   ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
   915  #   ## If no servers are specified, then localhost is used as the host.
   916  #   servers = ["localhost"]
   917  
   918  
   919  # # Provide a native collection for dmsetup based statistics for dm-cache
   920  # [[inputs.dmcache]]
   921  #   ## Whether to report per-device stats or not
   922  #   per_device = true
   923  
   924  
   925  # # Query given DNS server and gives statistics
   926  # [[inputs.dns_query]]
   927  #   ## servers to query
   928  #   servers = ["8.8.8.8"] # required
   929  #
   930  #   ## Domains or subdomains to query. "."(root) is default
   931  #   domains = ["."] # optional
   932  #
   933  #   ## Query record type. Default is "A"
   934  #   ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
   935  #   record_type = "A" # optional
   936  #
   937  #   ## Dns server port. 53 is default
   938  #   port = 53 # optional
   939  #
   940  #   ## Query timeout in seconds. Default is 2 seconds
   941  #   timeout = 2 # optional
   942  
   943  
   944  # # Read metrics about docker containers
   945  # [[inputs.docker]]
   946  #   ## Docker Endpoint
   947  #   ##   To use TCP, set endpoint = "tcp://[ip]:[port]"
   948  #   ##   To use environment variables (ie, docker-machine), set endpoint = "ENV"
   949  #   endpoint = "unix:///var/run/docker.sock"
   950  #   ## Only collect metrics for these containers, collect all if empty
   951  #   container_names = []
   952  #   ## Timeout for docker list, info, and stats commands
   953  #   timeout = "5s"
   954  #
   955  #   ## Whether to report for each container per-device blkio (8:0, 8:1...) and
   956  #   ## network (eth0, eth1, ...) stats or not
   957  #   perdevice = true
   958  #   ## Whether to report for each container total blkio and network stats or not
   959  #   total = false
   960  #
   961  #   ## docker labels to include and exclude as tags.  Globs accepted.
   962  #   ## Note that an empty array for both will include all labels as tags
   963  #   docker_label_include = []
   964  #   docker_label_exclude = []
   965  
   966  
   967  # # Read statistics from one or many dovecot servers
   968  # [[inputs.dovecot]]
   969  #   ## specify dovecot servers via an address:port list
   970  #   ##  e.g.
   971  #   ##    localhost:24242
   972  #   ##
   973  #   ## If no servers are specified, then localhost is used as the host.
   974  #   servers = ["localhost:24242"]
   975  #   ## Type is one of "user", "domain", "ip", or "global"
   976  #   type = "global"
   977  #   ## Wildcard matches like "*.com". An empty string "" is same as "*"
   978  #   ## If type = "ip" filters should be <IP/network>
   979  #   filters = [""]
   980  
   981  
   982  # # Read stats from one or more Elasticsearch servers or clusters
   983  # [[inputs.elasticsearch]]
   984  #   ## specify a list of one or more Elasticsearch servers
   985  #   # you can add username and password to your url to use basic authentication:
   986  #   # servers = ["http://user:pass@localhost:9200"]
   987  #   servers = ["http://localhost:9200"]
   988  #
   989  #   ## Timeout for HTTP requests to the elastic search server(s)
   990  #   http_timeout = "5s"
   991  #
   992  #   ## When local is true (the default), the node will read only its own stats.
   993  #   ## Set local to false when you want to read the node stats from all nodes
   994  #   ## of the cluster.
   995  #   local = true
   996  #
   997  #   ## Set cluster_health to true when you want to also obtain cluster health stats
   998  #   cluster_health = false
   999  #
  1000  #   ## Set cluster_stats to true when you want to also obtain cluster stats from the
  1001  #   ## Master node.
  1002  #   cluster_stats = false
  1003  #
  1004  #   ## Optional SSL Config
  1005  #   # ssl_ca = "/etc/telegraf/ca.pem"
  1006  #   # ssl_cert = "/etc/telegraf/cert.pem"
  1007  #   # ssl_key = "/etc/telegraf/key.pem"
  1008  #   ## Use SSL but skip chain & host verification
  1009  #   # insecure_skip_verify = false
  1010  
  1011  
  1012  # # Read metrics from one or more commands that can output to stdout
  1013  # [[inputs.exec]]
  1014  #   ## Commands array
  1015  #   commands = [
  1016  #     "/tmp/test.sh",
  1017  #     "/usr/bin/mycollector --foo=bar",
  1018  #     "/tmp/collect_*.sh"
  1019  #   ]
  1020  #
  1021  #   ## Timeout for each command to complete.
  1022  #   timeout = "5s"
  1023  #
  1024  #   ## measurement name suffix (for separating different commands)
  1025  #   name_suffix = "_mycollector"
  1026  #
  1027  #   ## Data format to consume.
  1028  #   ## Each data format has its own unique set of configuration options, read
  1029  #   ## more about them here:
  1030  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1031  #   data_format = "influx"
  1032  
  1033  
  1034  # # Read stats about given file(s)
  1035  # [[inputs.filestat]]
  1036  #   ## Files to gather stats about.
  1037  #   ## These accept standard unix glob matching rules, but with the addition of
  1038  #   ## ** as a "super asterisk". ie:
  1039  #   ##   "/var/log/**.log"  -> recursively find all .log files in /var/log
  1040  #   ##   "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
  1041  #   ##   "/var/log/apache.log" -> just tail the apache log file
  1042  #   ##
  1043  #   ## See https://github.com/gobwas/glob for more examples
  1044  #   ##
  1045  #   files = ["/var/log/**.log"]
  1046  #   ## If true, read the entire file and calculate an md5 checksum.
  1047  #   md5 = false
  1048  
  1049  
  1050  # # Read flattened metrics from one or more GrayLog HTTP endpoints
  1051  # [[inputs.graylog]]
  1052  #   ## API endpoint, currently supported API:
  1053  #   ##
  1054  #   ##   - multiple  (Ex http://<host>:12900/system/metrics/multiple)
  1055  #   ##   - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
  1056  #   ##
  1057  #   ## For namespace endpoint, the metrics array will be ignored for that call.
  1058  #   ## Endpoint can contain namespace and multiple type calls.
  1059  #   ##
  1060  #   ## Please check http://[graylog-server-ip]:12900/api-browser for full list
  1061  #   ## of endpoints
  1062  #   servers = [
  1063  #     "http://[graylog-server-ip]:12900/system/metrics/multiple",
  1064  #   ]
  1065  #
  1066  #   ## Metrics list
  1067  #   ## List of metrics can be found on Graylog webservice documentation.
  1068  #   ## Or by hitting the the web service api at:
  1069  #   ##   http://[graylog-host]:12900/system/metrics
  1070  #   metrics = [
  1071  #     "jvm.cl.loaded",
  1072  #     "jvm.memory.pools.Metaspace.committed"
  1073  #   ]
  1074  #
  1075  #   ## Username and password
  1076  #   username = ""
  1077  #   password = ""
  1078  #
  1079  #   ## Optional SSL Config
  1080  #   # ssl_ca = "/etc/telegraf/ca.pem"
  1081  #   # ssl_cert = "/etc/telegraf/cert.pem"
  1082  #   # ssl_key = "/etc/telegraf/key.pem"
  1083  #   ## Use SSL but skip chain & host verification
  1084  #   # insecure_skip_verify = false
  1085  
  1086  
  1087  # # Read metrics of haproxy, via socket or csv stats page
  1088  # [[inputs.haproxy]]
  1089  #   ## An array of address to gather stats about. Specify an ip on hostname
  1090  #   ## with optional port. ie localhost, 10.10.3.33:1936, etc.
  1091  #   ## Make sure you specify the complete path to the stats endpoint
  1092  #   ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
  1093  #
  1094  #   ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
  1095  #   servers = ["http://myhaproxy.com:1936/haproxy?stats"]
  1096  #
  1097  #   ## You can also use local socket with standard wildcard globbing.
  1098  #   ## Server address not starting with 'http' will be treated as a possible
  1099  #   ## socket, so both examples below are valid.
  1100  #   # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
  1101  #
  1102  #   ## By default, some of the fields are renamed from what haproxy calls them.
  1103  #   ## Setting this option to true results in the plugin keeping the original
  1104  #   ## field names.
  1105  #   # keep_field_names = true
  1106  #
  1107  #   ## Optional SSL Config
  1108  #   # ssl_ca = "/etc/telegraf/ca.pem"
  1109  #   # ssl_cert = "/etc/telegraf/cert.pem"
  1110  #   # ssl_key = "/etc/telegraf/key.pem"
  1111  #   ## Use SSL but skip chain & host verification
  1112  #   # insecure_skip_verify = false
  1113  
  1114  
  1115  # # Monitor disks' temperatures using hddtemp
  1116  # [[inputs.hddtemp]]
  1117  #   ## By default, telegraf gathers temps data from all disks detected by the
  1118  #   ## hddtemp.
  1119  #   ##
  1120  #   ## Only collect temps from the selected disks.
  1121  #   ##
  1122  #   ## A * as the device name will return the temperature values of all disks.
  1123  #   ##
  1124  #   # address = "127.0.0.1:7634"
  1125  #   # devices = ["sda", "*"]
  1126  
  1127  
  1128  # # HTTP/HTTPS request given an address a method and a timeout
  1129  # [[inputs.http_response]]
  1130  #   ## Server address (default http://localhost)
  1131  #   address = "http://github.com"
  1132  #   ## Set response_timeout (default 5 seconds)
  1133  #   response_timeout = "5s"
  1134  #   ## HTTP Request Method
  1135  #   method = "GET"
  1136  #   ## Whether to follow redirects from the server (defaults to false)
  1137  #   follow_redirects = true
  1138  #   ## HTTP Request Headers (all values must be strings)
  1139  #   # [inputs.http_response.headers]
  1140  #   #   Host = "github.com"
  1141  #   ## Optional HTTP Request Body
  1142  #   # body = '''
  1143  #   # {'fake':'data'}
  1144  #   # '''
  1145  #
  1146  #   ## Optional substring or regex match in body of the response
  1147  #   ## response_string_match = "\"service_status\": \"up\""
  1148  #   ## response_string_match = "ok"
  1149  #   ## response_string_match = "\".*_status\".?:.?\"up\""
  1150  #
  1151  #   ## Optional SSL Config
  1152  #   # ssl_ca = "/etc/telegraf/ca.pem"
  1153  #   # ssl_cert = "/etc/telegraf/cert.pem"
  1154  #   # ssl_key = "/etc/telegraf/key.pem"
  1155  #   ## Use SSL but skip chain & host verification
  1156  #   # insecure_skip_verify = false
  1157  
  1158  
  1159  # # Read flattened metrics from one or more JSON HTTP endpoints
  1160  # [[inputs.httpjson]]
  1161  #   ## NOTE This plugin only reads numerical measurements, strings and booleans
  1162  #   ## will be ignored.
  1163  #
  1164  #   ## Name for the service being polled.  Will be appended to the name of the
  1165  #   ## measurement e.g. httpjson_webserver_stats
  1166  #   ##
  1167  #   ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
  1168  #   name = "webserver_stats"
  1169  #
  1170  #   ## URL of each server in the service's cluster
  1171  #   servers = [
  1172  #     "http://localhost:9999/stats/",
  1173  #     "http://localhost:9998/stats/",
  1174  #   ]
  1175  #   ## Set response_timeout (default 5 seconds)
  1176  #   response_timeout = "5s"
  1177  #
  1178  #   ## HTTP method to use: GET or POST (case-sensitive)
  1179  #   method = "GET"
  1180  #
  1181  #   ## List of tag names to extract from top-level of JSON server response
  1182  #   # tag_keys = [
  1183  #   #   "my_tag_1",
  1184  #   #   "my_tag_2"
  1185  #   # ]
  1186  #
  1187  #   ## HTTP parameters (all values must be strings).  For "GET" requests, data
  1188  #   ## will be included in the query.  For "POST" requests, data will be included
  1189  #   ## in the request body as "x-www-form-urlencoded".
  1190  #   # [inputs.httpjson.parameters]
  1191  #   #   event_type = "cpu_spike"
  1192  #   #   threshold = "0.75"
  1193  #
  1194  #   ## HTTP Headers (all values must be strings)
  1195  #   # [inputs.httpjson.headers]
  1196  #   #   X-Auth-Token = "my-xauth-token"
  1197  #   #   apiVersion = "v1"
  1198  #
  1199  #   ## Optional SSL Config
  1200  #   # ssl_ca = "/etc/telegraf/ca.pem"
  1201  #   # ssl_cert = "/etc/telegraf/cert.pem"
  1202  #   # ssl_key = "/etc/telegraf/key.pem"
  1203  #   ## Use SSL but skip chain & host verification
  1204  #   # insecure_skip_verify = false
  1205  
  1206  
  1207  # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
  1208  # [[inputs.influxdb]]
  1209  #   ## Works with InfluxDB debug endpoints out of the box,
  1210  #   ## but other services can use this format too.
  1211  #   ## See the influxdb plugin's README for more details.
  1212  #
  1213  #   ## Multiple URLs from which to read InfluxDB-formatted JSON
  1214  #   ## Default is "http://localhost:8086/debug/vars".
  1215  #   urls = [
  1216  #     "http://localhost:8086/debug/vars"
  1217  #   ]
  1218  #
  1219  #   ## http request & header timeout
  1220  #   timeout = "5s"
  1221  
  1222  
  1223  # # Collect statistics about itself
  1224  # [[inputs.internal]]
  1225  #   ## If true, collect telegraf memory stats.
  1226  #   # collect_memstats = true
  1227  
  1228  
  1229  # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
  1230  # [[inputs.interrupts]]
  1231  #   ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
  1232  #   # [inputs.interrupts.tagdrop]
  1233  #     # irq = [ "NET_RX", "TASKLET" ]
  1234  
  1235  
  1236  # # Read metrics from the bare metal servers via IPMI
  1237  # [[inputs.ipmi_sensor]]
  1238  #   ## optionally specify the path to the ipmitool executable
  1239  #   # path = "/usr/bin/ipmitool"
  1240  #   #
  1241  #   ## optionally specify one or more servers via a url matching
  1242  #   ##  [username[:password]@][protocol[(address)]]
  1243  #   ##  e.g.
  1244  #   ##    root:passwd@lan(127.0.0.1)
  1245  #   ##
  1246  #   ## if no servers are specified, local machine sensor stats will be queried
  1247  #   ##
  1248  #   # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
  1249  
  1250  
  1251  # # Gather packets and bytes throughput from iptables
  1252  # [[inputs.iptables]]
  1253  #   ## iptables require root access on most systems.
  1254  #   ## Setting 'use_sudo' to true will make use of sudo to run iptables.
  1255  #   ## Users must configure sudo to allow telegraf user to run iptables with no password.
  1256  #   ## iptables can be restricted to only list command "iptables -nvL".
  1257  #   use_sudo = false
  1258  #   ## Setting 'use_lock' to true runs iptables with the "-w" option.
  1259  #   ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
  1260  #   use_lock = false
  1261  #   ## defines the table to monitor:
  1262  #   table = "filter"
  1263  #   ## defines the chains to monitor.
  1264  #   ## NOTE: iptables rules without a comment will not be monitored.
  1265  #   ## Read the plugin documentation for more information.
  1266  #   chains = [ "INPUT" ]
  1267  
  1268  
  1269  # # Read JMX metrics through Jolokia
  1270  # [[inputs.jolokia]]
  1271  #   ## This is the context root used to compose the jolokia url
  1272  #   ## NOTE that Jolokia requires a trailing slash at the end of the context root
  1273  #   ## NOTE that your jolokia security policy must allow for POST requests.
  1274  #   context = "/jolokia/"
  1275  #
  1276  #   ## This specifies the mode used
  1277  #   # mode = "proxy"
  1278  #   #
  1279  #   ## When in proxy mode this section is used to specify further
  1280  #   ## proxy address configurations.
  1281  #   ## Remember to change host address to fit your environment.
  1282  #   # [inputs.jolokia.proxy]
  1283  #   #   host = "127.0.0.1"
  1284  #   #   port = "8008"
  1285  #
  1286  #   ## Optional http timeouts
  1287  #   ##
  1288  #   ## response_header_timeout, if non-zero, specifies the amount of time to wait
  1289  #   ## for a server's response headers after fully writing the request.
  1290  #   # response_header_timeout = "3s"
  1291  #   ##
  1292  #   ## client_timeout specifies a time limit for requests made by this client.
  1293  #   ## Includes connection time, any redirects, and reading the response body.
  1294  #   # client_timeout = "4s"
  1295  #
  1296  #   ## Attribute delimiter
  1297  #   ##
  1298  #   ## When multiple attributes are returned for a single
  1299  #   ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
  1300  #   ## name, and the attribute name, separated by the given delimiter.
  1301  #   # delimiter = "_"
  1302  #
  1303  #   ## List of servers exposing jolokia read service
  1304  #   [[inputs.jolokia.servers]]
  1305  #     name = "as-server-01"
  1306  #     host = "127.0.0.1"
  1307  #     port = "8008"
  1308  #     # username = "myuser"
  1309  #     # password = "mypassword"
  1310  #
  1311  #   ## List of metrics collected on above servers
  1312  #   ## Each metric consists in a name, a jmx path and either
  1313  #   ## a pass or drop slice attribute.
  1314  #   ## This collect all heap memory usage metrics.
  1315  #   [[inputs.jolokia.metrics]]
  1316  #     name = "heap_memory_usage"
  1317  #     mbean  = "java.lang:type=Memory"
  1318  #     attribute = "HeapMemoryUsage"
  1319  #
  1320  #   ## This collect thread counts metrics.
  1321  #   [[inputs.jolokia.metrics]]
  1322  #     name = "thread_count"
  1323  #     mbean  = "java.lang:type=Threading"
  1324  #     attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
  1325  #
  1326  #   ## This collect number of class loaded/unloaded counts metrics.
  1327  #   [[inputs.jolokia.metrics]]
  1328  #     name = "class_count"
  1329  #     mbean  = "java.lang:type=ClassLoading"
  1330  #     attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
  1331  
  1332  
  1333  # # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
  1334  # [[inputs.kapacitor]]
  1335  #   ## Multiple URLs from which to read Kapacitor-formatted JSON
  1336  #   ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
  1337  #   urls = [
  1338  #     "http://localhost:9092/kapacitor/v1/debug/vars"
  1339  #   ]
  1340  #
  1341  #   ## Time limit for http requests
  1342  #   timeout = "5s"
  1343  
  1344  
  1345  # # Get kernel statistics from /proc/vmstat
  1346  # [[inputs.kernel_vmstat]]
  1347  #   # no configuration
  1348  
  1349  
  1350  # # Read metrics from the kubernetes kubelet api
  1351  # [[inputs.kubernetes]]
  1352  #   ## URL for the kubelet
  1353  #   url = "http://1.1.1.1:10255"
  1354  #
  1355  #   ## Use bearer token for authorization
  1356  #   # bearer_token = /path/to/bearer/token
  1357  #
  1358  #   ## Optional SSL Config
  1359  #   # ssl_ca = /path/to/cafile
  1360  #   # ssl_cert = /path/to/certfile
  1361  #   # ssl_key = /path/to/keyfile
  1362  #   ## Use SSL but skip chain & host verification
  1363  #   # insecure_skip_verify = false
  1364  
  1365  
  1366  # # Read metrics from a LeoFS Server via SNMP
  1367  # [[inputs.leofs]]
  1368  #   ## An array of URI to gather stats about LeoFS.
  1369  #   ## Specify an ip or hostname with port. ie 127.0.0.1:4020
  1370  #   servers = ["127.0.0.1:4021"]
  1371  
  1372  
  1373  # # Provides Linux sysctl fs metrics
  1374  # [[inputs.linux_sysctl_fs]]
  1375  #   # no configuration
  1376  
  1377  
  1378  # # Read metrics from local Lustre service on OST, MDS
  1379  # [[inputs.lustre2]]
  1380  #   ## An array of /proc globs to search for Lustre stats
  1381  #   ## If not specified, the default will work on Lustre 2.5.x
  1382  #   ##
  1383  #   # ost_procfiles = [
  1384  #   #   "/proc/fs/lustre/obdfilter/*/stats",
  1385  #   #   "/proc/fs/lustre/osd-ldiskfs/*/stats",
  1386  #   #   "/proc/fs/lustre/obdfilter/*/job_stats",
  1387  #   # ]
  1388  #   # mds_procfiles = [
  1389  #   #   "/proc/fs/lustre/mdt/*/md_stats",
  1390  #   #   "/proc/fs/lustre/mdt/*/job_stats",
  1391  #   # ]
  1392  
  1393  
  1394  # # Gathers metrics from the /3.0/reports MailChimp API
  1395  # [[inputs.mailchimp]]
  1396  #   ## MailChimp API key
  1397  #   ## get from https://admin.mailchimp.com/account/api/
  1398  #   api_key = "" # required
  1399  #   ## Reports for campaigns sent more than days_old ago will not be collected.
  1400  #   ## 0 means collect all.
  1401  #   days_old = 0
  1402  #   ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
  1403  #   # campaign_id = ""
  1404  
  1405  
  1406  # # Read metrics from one or many memcached servers
  1407  # [[inputs.memcached]]
  1408  #   ## An array of address to gather stats about. Specify an ip on hostname
  1409  #   ## with optional port. ie localhost, 10.0.0.1:11211, etc.
  1410  #   servers = ["localhost:11211"]
  1411  #   # unix_sockets = ["/var/run/memcached.sock"]
  1412  
  1413  
  1414  # # Telegraf plugin for gathering metrics from N Mesos masters
  1415  # [[inputs.mesos]]
  1416  #   ## Timeout, in ms.
  1417  #   timeout = 100
  1418  #   ## A list of Mesos masters.
  1419  #   masters = ["localhost:5050"]
  1420  #   ## Master metrics groups to be collected, by default, all enabled.
  1421  #   master_collections = [
  1422  #     "resources",
  1423  #     "master",
  1424  #     "system",
  1425  #     "agents",
  1426  #     "frameworks",
  1427  #     "tasks",
  1428  #     "messages",
  1429  #     "evqueue",
  1430  #     "registrar",
  1431  #   ]
  1432  #   ## A list of Mesos slaves, default is []
  1433  #   # slaves = []
  1434  #   ## Slave metrics groups to be collected, by default, all enabled.
  1435  #   # slave_collections = [
  1436  #   #   "resources",
  1437  #   #   "agent",
  1438  #   #   "system",
  1439  #   #   "executors",
  1440  #   #   "tasks",
  1441  #   #   "messages",
  1442  #   # ]
  1443  
  1444  
  1445  # # Read metrics from one or many MongoDB servers
  1446  # [[inputs.mongodb]]
  1447  #   ## An array of URI to gather stats about. Specify an ip or hostname
  1448  #   ## with optional port add password. ie,
  1449  #   ##   mongodb://user:auth_key@10.10.3.30:27017,
  1450  #   ##   mongodb://10.10.3.33:18832,
  1451  #   ##   10.0.0.1:10000, etc.
  1452  #   servers = ["127.0.0.1:27017"]
  1453  #   gather_perdb_stats = false
  1454  #
  1455  #   ## Optional SSL Config
  1456  #   # ssl_ca = "/etc/telegraf/ca.pem"
  1457  #   # ssl_cert = "/etc/telegraf/cert.pem"
  1458  #   # ssl_key = "/etc/telegraf/key.pem"
  1459  #   ## Use SSL but skip chain & host verification
  1460  #   # insecure_skip_verify = false
  1461  
  1462  
  1463  # # Read metrics from one or many mysql servers
  1464  # [[inputs.mysql]]
  1465  #   ## specify servers via a url matching:
  1466  #   ##  [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
  1467  #   ##  see https://github.com/go-sql-driver/mysql#dsn-data-source-name
  1468  #   ##  e.g.
  1469  #   ##    servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
  1470  #   ##    servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
  1471  #   #
  1472  #   ## If no servers are specified, then localhost is used as the host.
  1473  #   servers = ["tcp(127.0.0.1:3306)/"]
  1474  #   ## the limits for metrics form perf_events_statements
  1475  #   perf_events_statements_digest_text_limit  = 120
  1476  #   perf_events_statements_limit              = 250
  1477  #   perf_events_statements_time_limit         = 86400
  1478  #   #
  1479  #   ## if the list is empty, then metrics are gathered from all databasee tables
  1480  #   table_schema_databases                    = []
  1481  #   #
  1482  #   ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
  1483  #   gather_table_schema                       = false
  1484  #   #
  1485  #   ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
  1486  #   gather_process_list                       = true
  1487  #   #
  1488  #   ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS
  1489  #   gather_user_statistics                    = true
  1490  #   #
  1491  #   ## gather auto_increment columns and max values from information schema
  1492  #   gather_info_schema_auto_inc               = true
  1493  #   #
  1494  #   ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
  1495  #   gather_innodb_metrics                     = true
  1496  #   #
  1497  #   ## gather metrics from SHOW SLAVE STATUS command output
  1498  #   gather_slave_status                       = true
  1499  #   #
  1500  #   ## gather metrics from SHOW BINARY LOGS command output
  1501  #   gather_binary_logs                        = false
  1502  #   #
  1503  #   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
  1504  #   gather_table_io_waits                     = false
  1505  #   #
  1506  #   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
  1507  #   gather_table_lock_waits                   = false
  1508  #   #
  1509  #   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
  1510  #   gather_index_io_waits                     = false
  1511  #   #
  1512  #   ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
  1513  #   gather_event_waits                        = false
  1514  #   #
  1515  #   ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
  1516  #   gather_file_events_stats                  = false
  1517  #   #
  1518  #   ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
  1519  #   gather_perf_events_statements             = false
  1520  #   #
  1521  #   ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
  1522  #   interval_slow                   = "30m"
  1523  
  1524  
  1525  # # Read metrics about network interface usage
  1526  [[inputs.net]]
  1527    ## By default, telegraf gathers stats from any up interface (excluding loopback)
  1528    ## Setting interfaces will tell it to gather these explicit interfaces,
  1529    ## regardless of status.
  1530    ##
  1531    # interfaces = ["eth0"]
  1532  
  1533  
  1534  # # TCP or UDP 'ping' given url and collect response time in seconds
  1535  # [[inputs.net_response]]
  1536  #   ## Protocol, must be "tcp" or "udp"
  1537  #   ## NOTE: because the "udp" protocol does not respond to requests, it requires
  1538  #   ## a send/expect string pair (see below).
  1539  #   protocol = "tcp"
  1540  #   ## Server address (default localhost)
  1541  #   address = "localhost:80"
  1542  #   ## Set timeout
  1543  #   timeout = "1s"
  1544  #
  1545  #   ## Set read timeout (only used if expecting a response)
  1546  #   read_timeout = "1s"
  1547  #
  1548  #   ## The following options are required for UDP checks. For TCP, they are
  1549  #   ## optional. The plugin will send the given string to the server and then
  1550  #   ## expect to receive the given 'expect' string back.
  1551  #   ## string sent to the server
  1552  #   # send = "ssh"
  1553  #   ## expected string in answer
  1554  #   # expect = "ssh"
  1555  
  1556  
  1557  # # Read TCP metrics such as established, time wait and sockets counts.
  1558  # [[inputs.netstat]]
  1559  #   # no configuration
  1560  
  1561  
  1562  # # Read Nginx's basic status information (ngx_http_stub_status_module)
  1563  # [[inputs.nginx]]
  1564  #   ## An array of Nginx stub_status URI to gather stats.
  1565  #   urls = ["http://localhost/status"]
  1566  
  1567  
  1568  # # Read NSQ topic and channel statistics.
  1569  # [[inputs.nsq]]
  1570  #   ## An array of NSQD HTTP API endpoints
  1571  #   endpoints = ["http://localhost:4151"]
  1572  
  1573  
  1574  # # Collect kernel snmp counters and network interface statistics
  1575  # [[inputs.nstat]]
  1576  #   ## file paths for proc files. If empty default paths will be used:
  1577  #   ##    /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
  1578  #   ## These can also be overridden with env variables, see README.
  1579  #   proc_net_netstat = "/proc/net/netstat"
  1580  #   proc_net_snmp = "/proc/net/snmp"
  1581  #   proc_net_snmp6 = "/proc/net/snmp6"
  1582  #   ## dump metrics with 0 values too
  1583  #   dump_zeros       = true
  1584  
  1585  
  1586  # # Get standard NTP query metrics, requires ntpq executable.
  1587  # [[inputs.ntpq]]
  1588  #   ## If false, set the -n ntpq flag. Can reduce metric gather time.
  1589  #   dns_lookup = true
  1590  
  1591  
  1592  # # Read metrics of passenger using passenger-status
  1593  # [[inputs.passenger]]
  1594  #   ## Path of passenger-status.
  1595  #   ##
  1596  #   ## Plugin gather metric via parsing XML output of passenger-status
  1597  #   ## More information about the tool:
  1598  #   ##   https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
  1599  #   ##
  1600  #   ## If no path is specified, then the plugin simply execute passenger-status
  1601  #   ## hopefully it can be found in your PATH
  1602  #   command = "passenger-status -v --show=xml"
  1603  
  1604  
  1605  # # Read metrics of phpfpm, via HTTP status page or socket
  1606  # [[inputs.phpfpm]]
  1607  #   ## An array of addresses to gather stats about. Specify an ip or hostname
  1608  #   ## with optional port and path
  1609  #   ##
  1610  #   ## Plugin can be configured in three modes (either can be used):
  1611  #   ##   - http: the URL must start with http:// or https://, ie:
  1612  #   ##       "http://localhost/status"
  1613  #   ##       "http://192.168.130.1/status?full"
  1614  #   ##
  1615  #   ##   - unixsocket: path to fpm socket, ie:
  1616  #   ##       "/var/run/php5-fpm.sock"
  1617  #   ##      or using a custom fpm status path:
  1618  #   ##       "/var/run/php5-fpm.sock:fpm-custom-status-path"
  1619  #   ##
  1620  #   ##   - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
  1621  #   ##       "fcgi://10.0.0.12:9000/status"
  1622  #   ##       "cgi://10.0.10.12:9001/status"
  1623  #   ##
  1624  #   ## Example of multiple gathering from local socket and remove host
  1625  #   ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
  1626  #   urls = ["http://localhost/status"]
  1627  
  1628  
  1629  # # Ping given url(s) and return statistics
  1630  # [[inputs.ping]]
  1631  #   ## NOTE: this plugin forks the ping command. You may need to set capabilities
  1632  #   ## via setcap cap_net_raw+p /bin/ping
  1633  #   #
  1634  #   ## List of urls to ping
  1635  #   urls = ["www.google.com"] # required
  1636  #   ## number of pings to send per collection (ping -c <COUNT>)
  1637  #   # count = 1
  1638  #   ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
  1639  #   # ping_interval = 1.0
  1640  #   ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
  1641  #   # timeout = 1.0
  1642  #   ## interface to send ping from (ping -I <INTERFACE>)
  1643  #   # interface = ""
  1644  
  1645  
  1646  # # Read metrics from one or many postgresql servers
  1647  # [[inputs.postgresql]]
  1648  #   ## specify address via a url matching:
  1649  #   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
  1650  #   ##       ?sslmode=[disable|verify-ca|verify-full]
  1651  #   ## or a simple string:
  1652  #   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
  1653  #   ##
  1654  #   ## All connection parameters are optional.
  1655  #   ##
  1656  #   ## Without the dbname parameter, the driver will default to a database
  1657  #   ## with the same name as the user. This dbname is just for instantiating a
  1658  #   ## connection with the server and doesn't restrict the databases we are trying
  1659  #   ## to grab metrics for.
  1660  #   ##
  1661  #   address = "host=localhost user=postgres sslmode=disable"
  1662  #
  1663  #   ## A  list of databases to explicitly ignore.  If not specified, metrics for all
  1664  #   ## databases are gathered.  Do NOT use with the 'databases' option.
  1665  #   # ignored_databases = ["postgres", "template0", "template1"]
  1666  #
  1667  #   ## A list of databases to pull metrics about. If not specified, metrics for all
  1668  #   ## databases are gathered.  Do NOT use with the 'ignored_databases' option.
  1669  #   # databases = ["app_production", "testing"]
  1670  
  1671  
  1672  # # Read metrics from one or many postgresql servers
  1673  # [[inputs.postgresql_extensible]]
  1674  #   ## specify address via a url matching:
  1675  #   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
  1676  #   ##       ?sslmode=[disable|verify-ca|verify-full]
  1677  #   ## or a simple string:
  1678  #   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
  1679  #   #
  1680  #   ## All connection parameters are optional.  #
  1681  #   ## Without the dbname parameter, the driver will default to a database
  1682  #   ## with the same name as the user. This dbname is just for instantiating a
  1683  #   ## connection with the server and doesn't restrict the databases we are trying
  1684  #   ## to grab metrics for.
  1685  #   #
  1686  #   address = "host=localhost user=postgres sslmode=disable"
  1687  #   ## A list of databases to pull metrics about. If not specified, metrics for all
  1688  #   ## databases are gathered.
  1689  #   ## databases = ["app_production", "testing"]
  1690  #   #
  1691  #   # outputaddress = "db01"
  1692  #   ## A custom name for the database that will be used as the "server" tag in the
  1693  #   ## measurement output. If not specified, a default one generated from
  1694  #   ## the connection address is used.
  1695  #   #
  1696  #   ## Define the toml config where the sql queries are stored
  1697  #   ## New queries can be added, if the withdbname is set to true and there is no
  1698  #   ## databases defined in the 'databases field', the sql query is ended by a
  1699  #   ## 'is not null' in order to make the query succeed.
  1700  #   ## Example :
  1701  #   ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
  1702  #   ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
  1703  #   ## because the databases variable was set to ['postgres', 'pgbench' ] and the
  1704  #   ## withdbname was true. Be careful that if the withdbname is set to false you
  1705  #   ## don't have to define the where clause (aka with the dbname) the tagvalue
  1706  #   ## field is used to define custom tags (separated by commas)
  1707  #   ## The optional "measurement" value can be used to override the default
  1708  #   ## output measurement name ("postgresql").
  1709  #   #
  1710  #   ## Structure :
  1711  #   ## [[inputs.postgresql_extensible.query]]
  1712  #   ##   sqlquery string
  1713  #   ##   version string
  1714  #   ##   withdbname boolean
  1715  #   ##   tagvalue string (comma separated)
  1716  #   ##   measurement string
  1717  #   [[inputs.postgresql_extensible.query]]
  1718  #     sqlquery="SELECT * FROM pg_stat_database"
  1719  #     version=901
  1720  #     withdbname=false
  1721  #     tagvalue=""
  1722  #     measurement=""
  1723  #   [[inputs.postgresql_extensible.query]]
  1724  #     sqlquery="SELECT * FROM pg_stat_bgwriter"
  1725  #     version=901
  1726  #     withdbname=false
  1727  #     tagvalue="postgresql.stats"
  1728  
  1729  
  1730  # # Read metrics from one or many PowerDNS servers
  1731  # [[inputs.powerdns]]
  1732  #   ## An array of sockets to gather stats about.
  1733  #   ## Specify a path to unix socket.
  1734  #   unix_sockets = ["/var/run/pdns.controlsocket"]
  1735  
  1736  
  1737  # # Monitor process cpu and memory usage
  1738  # [[inputs.procstat]]
  1739  #   ## Must specify one of: pid_file, exe, or pattern
  1740  #   ## PID file to monitor process
  1741  #   pid_file = "/var/run/nginx.pid"
  1742  #   ## executable name (ie, pgrep <exe>)
  1743  #   # exe = "nginx"
  1744  #   ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
  1745  #   # pattern = "nginx"
  1746  #   ## user as argument for pgrep (ie, pgrep -u <user>)
  1747  #   # user = "nginx"
  1748  #
  1749  #   ## override for process_name
  1750  #   ## This is optional; default is sourced from /proc/<pid>/status
  1751  #   # process_name = "bar"
  1752  #   ## Field name prefix
  1753  #   prefix = ""
  1754  #   ## comment this out if you want raw cpu_time stats
  1755  #   fielddrop = ["cpu_time_*"]
  1756  #   ## This is optional; moves pid into a tag instead of a field
  1757  #   pid_tag = false
  1758  
  1759  
  1760  # # Read metrics from one or many prometheus clients
  1761  # [[inputs.prometheus]]
  1762  #   ## An array of urls to scrape metrics from.
  1763  #   urls = ["http://localhost:9100/metrics"]
  1764  #
  1765  #   ## Use bearer token for authorization
  1766  #   # bearer_token = /path/to/bearer/token
  1767  #
  1768  #   ## Specify timeout duration for slower prometheus clients (default is 3s)
  1769  #   # response_timeout = "3s"
  1770  #
  1771  #   ## Optional SSL Config
  1772  #   # ssl_ca = /path/to/cafile
  1773  #   # ssl_cert = /path/to/certfile
  1774  #   # ssl_key = /path/to/keyfile
  1775  #   ## Use SSL but skip chain & host verification
  1776  #   # insecure_skip_verify = false
  1777  
  1778  
  1779  # # Reads last_run_summary.yaml file and converts to measurments
  1780  # [[inputs.puppetagent]]
  1781  #   ## Location of puppet last run summary file
  1782  #   location = "/var/lib/puppet/state/last_run_summary.yaml"
  1783  
  1784  
  1785  # # Read metrics from one or many RabbitMQ servers via the management API
  1786  # [[inputs.rabbitmq]]
  1787  #   # url = "http://localhost:15672"
  1788  #   # name = "rmq-server-1" # optional tag
  1789  #   # username = "guest"
  1790  #   # password = "guest"
  1791  #
  1792  #   ## Optional SSL Config
  1793  #   # ssl_ca = "/etc/telegraf/ca.pem"
  1794  #   # ssl_cert = "/etc/telegraf/cert.pem"
  1795  #   # ssl_key = "/etc/telegraf/key.pem"
  1796  #   ## Use SSL but skip chain & host verification
  1797  #   # insecure_skip_verify = false
  1798  #
  1799  #   ## Optional request timeouts
  1800  #   ##
  1801  #   ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
  1802  #   ## for a server's response headers after fully writing the request.
  1803  #   # header_timeout = "3s"
  1804  #   ##
  1805  #   ## client_timeout specifies a time limit for requests made by this client.
  1806  #   ## Includes connection time, any redirects, and reading the response body.
  1807  #   # client_timeout = "4s"
  1808  #
  1809  #   ## A list of nodes to pull metrics about. If not specified, metrics for
  1810  #   ## all nodes are gathered.
  1811  #   # nodes = ["rabbit@node1", "rabbit@node2"]
  1812  
  1813  
  1814  # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
  1815  # [[inputs.raindrops]]
  1816  #   ## An array of raindrops middleware URI to gather stats.
  1817  #   urls = ["http://localhost:8008/_raindrops"]
  1818  
  1819  
  1820  # # Read metrics from one or many redis servers
  1821  # [[inputs.redis]]
  1822  #   ## specify servers via a url matching:
  1823  #   ##  [protocol://][:password]@address[:port]
  1824  #   ##  e.g.
  1825  #   ##    tcp://localhost:6379
  1826  #   ##    tcp://:password@192.168.99.100
  1827  #   ##    unix:///var/run/redis.sock
  1828  #   ##
  1829  #   ## If no servers are specified, then localhost is used as the host.
  1830  #   ## If no port is specified, 6379 is used
  1831  #   servers = ["tcp://localhost:6379"]
  1832  
  1833  
  1834  # # Read metrics from one or many RethinkDB servers
  1835  # [[inputs.rethinkdb]]
  1836  #   ## An array of URI to gather stats about. Specify an ip or hostname
  1837  #   ## with optional port add password. ie,
  1838  #   ##   rethinkdb://user:auth_key@10.10.3.30:28105,
  1839  #   ##   rethinkdb://10.10.3.33:18832,
  1840  #   ##   10.0.0.1:10000, etc.
  1841  #   servers = ["127.0.0.1:28015"]
  1842  
  1843  
  1844  # # Read metrics one or many Riak servers
  1845  # [[inputs.riak]]
  1846  #   # Specify a list of one or more riak http servers
  1847  #   servers = ["http://localhost:8098"]
  1848  
  1849  
  1850  # # Monitor sensors, requires lm-sensors package
  1851  # [[inputs.sensors]]
  1852  #   ## Remove numbers from field names.
  1853  #   ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
  1854  #   # remove_numbers = true
  1855  
  1856  
  1857  # # Retrieves SNMP values from remote agents
  1858  # [[inputs.snmp]]
  1859  #   agents = [ "127.0.0.1:161" ]
  1860  #   ## Timeout for each SNMP query.
  1861  #   timeout = "5s"
  1862  #   ## Number of retries to attempt within timeout.
  1863  #   retries = 3
  1864  #   ## SNMP version, values can be 1, 2, or 3
  1865  #   version = 2
  1866  #
  1867  #   ## SNMP community string.
  1868  #   community = "public"
  1869  #
  1870  #   ## The GETBULK max-repetitions parameter
  1871  #   max_repetitions = 10
  1872  #
  1873  #   ## SNMPv3 auth parameters
  1874  #   #sec_name = "myuser"
  1875  #   #auth_protocol = "md5"      # Values: "MD5", "SHA", ""
  1876  #   #auth_password = "pass"
  1877  #   #sec_level = "authNoPriv"   # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
  1878  #   #context_name = ""
  1879  #   #priv_protocol = ""         # Values: "DES", "AES", ""
  1880  #   #priv_password = ""
  1881  #
  1882  #   ## measurement name
  1883  #   name = "system"
  1884  #   [[inputs.snmp.field]]
  1885  #     name = "hostname"
  1886  #     oid = ".1.0.0.1.1"
  1887  #   [[inputs.snmp.field]]
  1888  #     name = "uptime"
  1889  #     oid = ".1.0.0.1.2"
  1890  #   [[inputs.snmp.field]]
  1891  #     name = "load"
  1892  #     oid = ".1.0.0.1.3"
  1893  #   [[inputs.snmp.field]]
  1894  #     oid = "HOST-RESOURCES-MIB::hrMemorySize"
  1895  #
  1896  #   [[inputs.snmp.table]]
  1897  #     ## measurement name
  1898  #     name = "remote_servers"
  1899  #     inherit_tags = [ "hostname" ]
  1900  #     [[inputs.snmp.table.field]]
  1901  #       name = "server"
  1902  #       oid = ".1.0.0.0.1.0"
  1903  #       is_tag = true
  1904  #     [[inputs.snmp.table.field]]
  1905  #       name = "connections"
  1906  #       oid = ".1.0.0.0.1.1"
  1907  #     [[inputs.snmp.table.field]]
  1908  #       name = "latency"
  1909  #       oid = ".1.0.0.0.1.2"
  1910  #
  1911  #   [[inputs.snmp.table]]
  1912  #     ## auto populate table's fields using the MIB
  1913  #     oid = "HOST-RESOURCES-MIB::hrNetworkTable"
  1914  
  1915  
  1916  # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
  1917  # [[inputs.snmp_legacy]]
  1918  #   ## Use 'oids.txt' file to translate oids to names
  1919  #   ## To generate 'oids.txt' you need to run:
  1920  #   ##   snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
  1921  #   ## Or if you have an other MIB folder with custom MIBs
  1922  #   ##   snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
  1923  #   snmptranslate_file = "/tmp/oids.txt"
  1924  #   [[inputs.snmp.host]]
  1925  #     address = "192.168.2.2:161"
  1926  #     # SNMP community
  1927  #     community = "public" # default public
  1928  #     # SNMP version (1, 2 or 3)
  1929  #     # Version 3 not supported yet
  1930  #     version = 2 # default 2
  1931  #     # SNMP response timeout
  1932  #     timeout = 2.0 # default 2.0
  1933  #     # SNMP request retries
  1934  #     retries = 2 # default 2
  1935  #     # Which get/bulk do you want to collect for this host
  1936  #     collect = ["mybulk", "sysservices", "sysdescr"]
  1937  #     # Simple list of OIDs to get, in addition to "collect"
  1938  #     get_oids = []
  1939  #
  1940  #   [[inputs.snmp.host]]
  1941  #     address = "192.168.2.3:161"
  1942  #     community = "public"
  1943  #     version = 2
  1944  #     timeout = 2.0
  1945  #     retries = 2
  1946  #     collect = ["mybulk"]
  1947  #     get_oids = [
  1948  #         "ifNumber",
  1949  #         ".1.3.6.1.2.1.1.3.0",
  1950  #     ]
  1951  #
  1952  #   [[inputs.snmp.get]]
  1953  #     name = "ifnumber"
  1954  #     oid = "ifNumber"
  1955  #
  1956  #   [[inputs.snmp.get]]
  1957  #     name = "interface_speed"
  1958  #     oid = "ifSpeed"
  1959  #     instance = "0"
  1960  #
  1961  #   [[inputs.snmp.get]]
  1962  #     name = "sysuptime"
  1963  #     oid = ".1.3.6.1.2.1.1.3.0"
  1964  #     unit = "second"
  1965  #
  1966  #   [[inputs.snmp.bulk]]
  1967  #     name = "mybulk"
  1968  #     max_repetition = 127
  1969  #     oid = ".1.3.6.1.2.1.1"
  1970  #
  1971  #   [[inputs.snmp.bulk]]
  1972  #     name = "ifoutoctets"
  1973  #     max_repetition = 127
  1974  #     oid = "ifOutOctets"
  1975  #
  1976  #   [[inputs.snmp.host]]
  1977  #     address = "192.168.2.13:161"
  1978  #     #address = "127.0.0.1:161"
  1979  #     community = "public"
  1980  #     version = 2
  1981  #     timeout = 2.0
  1982  #     retries = 2
  1983  #     #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
  1984  #     collect = ["sysuptime" ]
  1985  #     [[inputs.snmp.host.table]]
  1986  #       name = "iftable3"
  1987  #       include_instances = ["enp5s0", "eth1"]
  1988  #
  1989  #   # SNMP TABLEs
  1990  #   # table without mapping neither subtables
  1991  #   [[inputs.snmp.table]]
  1992  #     name = "iftable1"
  1993  #     oid = ".1.3.6.1.2.1.31.1.1.1"
  1994  #
  1995  #   # table without mapping but with subtables
  1996  #   [[inputs.snmp.table]]
  1997  #     name = "iftable2"
  1998  #     oid = ".1.3.6.1.2.1.31.1.1.1"
  1999  #     sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
  2000  #
  2001  #   # table with mapping but without subtables
  2002  #   [[inputs.snmp.table]]
  2003  #     name = "iftable3"
  2004  #     oid = ".1.3.6.1.2.1.31.1.1.1"
  2005  #     # if empty. get all instances
  2006  #     mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
  2007  #     # if empty, get all subtables
  2008  #
  2009  #   # table with both mapping and subtables
  2010  #   [[inputs.snmp.table]]
  2011  #     name = "iftable4"
  2012  #     oid = ".1.3.6.1.2.1.31.1.1.1"
  2013  #     # if empty get all instances
  2014  #     mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
  2015  #     # if empty get all subtables
  2016  #     # sub_tables could be not "real subtables"
  2017  #     sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
  2018  
  2019  
  2020  # # Read metrics from Microsoft SQL Server
  2021  # [[inputs.sqlserver]]
  2022  #   ## Specify instances to monitor with a list of connection strings.
  2023  #   ## All connection parameters are optional.
  2024  #   ## By default, the host is localhost, listening on default port, TCP 1433.
  2025  #   ##   for Windows, the user is the currently running AD user (SSO).
  2026  #   ##   See https://github.com/denisenkom/go-mssqldb for detailed connection
  2027  #   ##   parameters.
  2028  #   # servers = [
  2029  #   #  "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
  2030  #   # ]
  2031  
  2032  
  2033  # # Sysstat metrics collector
  2034  # [[inputs.sysstat]]
  2035  #   ## Path to the sadc command.
  2036  #   #
  2037  #   ## Common Defaults:
  2038  #   ##   Debian/Ubuntu: /usr/lib/sysstat/sadc
  2039  #   ##   Arch:          /usr/lib/sa/sadc
  2040  #   ##   RHEL/CentOS:   /usr/lib64/sa/sadc
  2041  #   sadc_path = "/usr/lib/sa/sadc" # required
  2042  #   #
  2043  #   #
  2044  #   ## Path to the sadf command, if it is not in PATH
  2045  #   # sadf_path = "/usr/bin/sadf"
  2046  #   #
  2047  #   #
  2048  #   ## Activities is a list of activities, that are passed as argument to the
  2049  #   ## sadc collector utility (e.g: DISK, SNMP etc...)
  2050  #   ## The more activities that are added, the more data is collected.
  2051  #   # activities = ["DISK"]
  2052  #   #
  2053  #   #
  2054  #   ## Group metrics to measurements.
  2055  #   ##
  2056  #   ## If group is false each metric will be prefixed with a description
  2057  #   ## and represents itself a measurement.
  2058  #   ##
  2059  #   ## If Group is true, corresponding metrics are grouped to a single measurement.
  2060  #   # group = true
  2061  #   #
  2062  #   #
  2063  #   ## Options for the sadf command. The values on the left represent the sadf
  2064  #   ## options and the values on the right their description (wich are used for
  2065  #   ## grouping and prefixing metrics).
  2066  #   ##
  2067  #   ## Run 'sar -h' or 'man sar' to find out the supported options for your
  2068  #   ## sysstat version.
  2069  #   [inputs.sysstat.options]
  2070  #     -C = "cpu"
  2071  #     -B = "paging"
  2072  #     -b = "io"
  2073  #     -d = "disk"             # requires DISK activity
  2074  #     "-n ALL" = "network"
  2075  #     "-P ALL" = "per_cpu"
  2076  #     -q = "queue"
  2077  #     -R = "mem"
  2078  #     -r = "mem_util"
  2079  #     -S = "swap_util"
  2080  #     -u = "cpu_util"
  2081  #     -v = "inode"
  2082  #     -W = "swap"
  2083  #     -w = "task"
  2084  #   #  -H = "hugepages"        # only available for newer linux distributions
  2085  #   #  "-I ALL" = "interrupts" # requires INT activity
  2086  #   #
  2087  #   #
  2088  #   ## Device tags can be used to add additional tags for devices.
  2089  #   ## For example the configuration below adds a tag vg with value rootvg for
  2090  #   ## all metrics with sda devices.
  2091  #   # [[inputs.sysstat.device_tags.sda]]
  2092  #   #  vg = "rootvg"
  2093  
  2094  
  2095  # # Inserts sine and cosine waves for demonstration purposes
  2096  # [[inputs.trig]]
  2097  #   ## Set the amplitude
  2098  #   amplitude = 10.0
  2099  
  2100  
  2101  # # Read Twemproxy stats data
  2102  # [[inputs.twemproxy]]
  2103  #   ## Twemproxy stats address and port (no scheme)
  2104  #   addr = "localhost:22222"
  2105  #   ## Monitor pool name
  2106  #   pools = ["redis_pool", "mc_pool"]
  2107  
  2108  
  2109  # # A plugin to collect stats from Varnish HTTP Cache
  2110  # [[inputs.varnish]]
  2111  #   ## The default location of the varnishstat binary can be overridden with:
  2112  #   binary = "/usr/bin/varnishstat"
  2113  #
  2114  #   ## By default, telegraf gather stats for 3 metric points.
  2115  #   ## Setting stats will override the defaults shown below.
  2116  #   ## Glob matching can be used, ie, stats = ["MAIN.*"]
  2117  #   ## stats may also be set to ["*"], which will collect all stats
  2118  #   stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
  2119  
  2120  
  2121  # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
  2122  # [[inputs.zfs]]
  2123  #   ## ZFS kstat path. Ignored on FreeBSD
  2124  #   ## If not specified, then default is:
  2125  #   # kstatPath = "/proc/spl/kstat/zfs"
  2126  #
  2127  #   ## By default, telegraf gather all zfs stats
  2128  #   ## If not specified, then default is:
  2129  #   # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
  2130  #
  2131  #   ## By default, don't gather zpool stats
  2132  #   # poolMetrics = false
  2133  
  2134  
  2135  # # Reads 'mntr' stats from one or many zookeeper servers
  2136  # [[inputs.zookeeper]]
  2137  #   ## An array of address to gather stats about. Specify an ip or hostname
  2138  #   ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
  2139  #
  2140  #   ## If no servers are specified, then localhost is used as the host.
  2141  #   ## If no port is specified, 2181 is used
  2142  #   servers = [":2181"]
  2143  
  2144  
  2145  
  2146  ###############################################################################
  2147  #                            SERVICE INPUT PLUGINS                            #
  2148  ###############################################################################
  2149  
  2150  # # AMQP consumer plugin
  2151  # [[inputs.amqp_consumer]]
  2152  #   ## AMQP url
  2153  #   url = "amqp://localhost:5672/influxdb"
  2154  #   ## AMQP exchange
  2155  #   exchange = "telegraf"
  2156  #   ## AMQP queue name
  2157  #   queue = "telegraf"
  2158  #   ## Binding Key
  2159  #   binding_key = "#"
  2160  #
  2161  #   ## Maximum number of messages server should give to the worker.
  2162  #   prefetch_count = 50
  2163  #
  2164  #   ## Auth method. PLAIN and EXTERNAL are supported
  2165  #   ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
  2166  #   ## described here: https://www.rabbitmq.com/plugins.html
  2167  #   # auth_method = "PLAIN"
  2168  #
  2169  #   ## Optional SSL Config
  2170  #   # ssl_ca = "/etc/telegraf/ca.pem"
  2171  #   # ssl_cert = "/etc/telegraf/cert.pem"
  2172  #   # ssl_key = "/etc/telegraf/key.pem"
  2173  #   ## Use SSL but skip chain & host verification
  2174  #   # insecure_skip_verify = false
  2175  #
  2176  #   ## Data format to output.
  2177  #   ## Each data format has its own unique set of configuration options, read
  2178  #   ## more about them here:
  2179  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  2180  #   data_format = "influx"
  2181  
  2182  
  2183  # # Influx HTTP write listener
  2184  # [[inputs.http_listener]]
  2185  #   ## Address and port to host HTTP listener on
  2186  #   service_address = ":8186"
  2187  #
  2188  #   ## maximum duration before timing out read of the request
  2189  #   read_timeout = "10s"
  2190  #   ## maximum duration before timing out write of the response
  2191  #   write_timeout = "10s"
  2192  #
  2193  #   ## Maximum allowed http request body size in bytes.
  2194  #   ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
  2195  #   max_body_size = 0
  2196  #
  2197  #   ## Maximum line size allowed to be sent in bytes.
  2198  #   ## 0 means to use the default of 65536 bytes (64 kibibytes)
  2199  #   max_line_size = 0
  2200  
  2201  
  2202  # # Read metrics from Kafka topic(s)
  2203  # [[inputs.kafka_consumer]]
  2204  #   ## topic(s) to consume
  2205  #   topics = ["telegraf"]
  2206  #   ## an array of Zookeeper connection strings
  2207  #   zookeeper_peers = ["localhost:2181"]
  2208  #   ## Zookeeper Chroot
  2209  #   zookeeper_chroot = ""
  2210  #   ## the name of the consumer group
  2211  #   consumer_group = "telegraf_metrics_consumers"
  2212  #   ## Offset (must be either "oldest" or "newest")
  2213  #   offset = "oldest"
  2214  #
  2215  #   ## Data format to consume.
  2216  #   ## Each data format has its own unique set of configuration options, read
  2217  #   ## more about them here:
  2218  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  2219  #   data_format = "influx"
  2220  #
  2221  #   ## Maximum length of a message to consume, in bytes (default 0/unlimited);
  2222  #   ## larger messages are dropped
  2223  #   max_message_len = 65536
  2224  
  2225  
  2226  # # Stream and parse log file(s).
  2227  # [[inputs.logparser]]
  2228  #   ## Log files to parse.
  2229  #   ## These accept standard unix glob matching rules, but with the addition of
  2230  #   ## ** as a "super asterisk". ie:
  2231  #   ##   /var/log/**.log     -> recursively find all .log files in /var/log
  2232  #   ##   /var/log/*/*.log    -> find all .log files with a parent dir in /var/log
  2233  #   ##   /var/log/apache.log -> only tail the apache log file
  2234  #   files = ["/var/log/apache/access.log"]
  2235  #   ## Read files that currently exist from the beginning. Files that are created
  2236  #   ## while telegraf is running (and that match the "files" globs) will always
  2237  #   ## be read from the beginning.
  2238  #   from_beginning = false
  2239  #
  2240  #   ## Parse logstash-style "grok" patterns:
  2241  #   ##   Telegraf built-in parsing patterns: https://goo.gl/dkay10
  2242  #   [inputs.logparser.grok]
  2243  #     ## This is a list of patterns to check the given log file(s) for.
  2244  #     ## Note that adding patterns here increases processing time. The most
  2245  #     ## efficient configuration is to have one pattern per logparser.
  2246  #     ## Other common built-in patterns are:
  2247  #     ##   %{COMMON_LOG_FORMAT}   (plain apache & nginx access logs)
  2248  #     ##   %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
  2249  #     patterns = ["%{COMBINED_LOG_FORMAT}"]
  2250  #     ## Name of the outputted measurement name.
  2251  #     measurement = "apache_access_log"
  2252  #     ## Full path(s) to custom pattern files.
  2253  #     custom_pattern_files = []
  2254  #     ## Custom patterns can also be defined here. Put one pattern per line.
  2255  #     custom_patterns = '''
  2256  #     '''
  2257  
  2258  
  2259  # # Read metrics from MQTT topic(s)
  2260  # [[inputs.mqtt_consumer]]
  2261  #   servers = ["localhost:1883"]
  2262  #   ## MQTT QoS, must be 0, 1, or 2
  2263  #   qos = 0
  2264  #
  2265  #   ## Topics to subscribe to
  2266  #   topics = [
  2267  #     "telegraf/host01/cpu",
  2268  #     "telegraf/+/mem",
  2269  #     "sensors/#",
  2270  #   ]
  2271  #
  2272  #   # if true, messages that can't be delivered while the subscriber is offline
  2273  #   # will be delivered when it comes back (such as on service restart).
  2274  #   # NOTE: if true, client_id MUST be set
  2275  #   persistent_session = false
  2276  #   # If empty, a random client ID will be generated.
  2277  #   client_id = ""
  2278  #
  2279  #   ## username and password to connect MQTT server.
  2280  #   # username = "telegraf"
  2281  #   # password = "metricsmetricsmetricsmetrics"
  2282  #
  2283  #   ## Optional SSL Config
  2284  #   # ssl_ca = "/etc/telegraf/ca.pem"
  2285  #   # ssl_cert = "/etc/telegraf/cert.pem"
  2286  #   # ssl_key = "/etc/telegraf/key.pem"
  2287  #   ## Use SSL but skip chain & host verification
  2288  #   # insecure_skip_verify = false
  2289  #
  2290  #   ## Data format to consume.
  2291  #   ## Each data format has its own unique set of configuration options, read
  2292  #   ## more about them here:
  2293  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  2294  #   data_format = "influx"
  2295  
  2296  
  2297  # # Read metrics from NATS subject(s)
  2298  # [[inputs.nats_consumer]]
  2299  #   ## urls of NATS servers
  2300  #   # servers = ["nats://localhost:4222"]
  2301  #   ## Use Transport Layer Security
  2302  #   # secure = false
  2303  #   ## subject(s) to consume
  2304  #   # subjects = ["telegraf"]
  2305  #   ## name a queue group
  2306  #   # queue_group = "telegraf_consumers"
  2307  #
  2308  #   ## Sets the limits for pending msgs and bytes for each subscription
  2309  #   ## These shouldn't need to be adjusted except in very high throughput scenarios
  2310  #   # pending_message_limit = 65536
  2311  #   # pending_bytes_limit = 67108864
  2312  #
  2313  #   ## Data format to consume.
  2314  #   ## Each data format has its own unique set of configuration options, read
  2315  #   ## more about them here:
  2316  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  2317  #   data_format = "influx"
  2318  
  2319  
  2320  # # Read NSQ topic for metrics.
  2321  # [[inputs.nsq_consumer]]
  2322  #   ## An string representing the NSQD TCP Endpoint
  2323  #   server = "localhost:4150"
  2324  #   topic = "telegraf"
  2325  #   channel = "consumer"
  2326  #   max_in_flight = 100
  2327  #
  2328  #   ## Data format to consume.
  2329  #   ## Each data format has its own unique set of configuration options, read
  2330  #   ## more about them here:
  2331  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  2332  #   data_format = "influx"
  2333  
  2334  
  2335  # # Generic socket listener capable of handling multiple socket types.
  2336  # [[inputs.socket_listener]]
  2337  #   ## URL to listen on
  2338  #   # service_address = "tcp://:8094"
  2339  #   # service_address = "tcp://127.0.0.1:http"
  2340  #   # service_address = "tcp4://:8094"
  2341  #   # service_address = "tcp6://:8094"
  2342  #   # service_address = "tcp6://[2001:db8::1]:8094"
  2343  #   # service_address = "udp://:8094"
  2344  #   # service_address = "udp4://:8094"
  2345  #   # service_address = "udp6://:8094"
  2346  #   # service_address = "unix:///tmp/telegraf.sock"
  2347  #   # service_address = "unixgram:///tmp/telegraf.sock"
  2348  #
  2349  #   ## Maximum number of concurrent connections.
  2350  #   ## Only applies to stream sockets (e.g. TCP).
  2351  #   ## 0 (default) is unlimited.
  2352  #   # max_connections = 1024
  2353  #
  2354  #   ## Maximum socket buffer size in bytes.
  2355  #   ## For stream sockets, once the buffer fills up, the sender will start backing up.
  2356  #   ## For datagram sockets, once the buffer fills up, metrics will start dropping.
  2357  #   ## Defaults to the OS default.
  2358  #   # read_buffer_size = 65535
  2359  #
  2360  #   ## Period between keep alive probes.
  2361  #   ## Only applies to TCP sockets.
  2362  #   ## 0 disables keep alive probes.
  2363  #   ## Defaults to the OS configuration.
  2364  #   # keep_alive_period = "5m"
  2365  #
  2366  #   ## Data format to consume.
  2367  #   ## Each data format has its own unique set of configuration options, read
  2368  #   ## more about them here:
  2369  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  2370  #   # data_format = "influx"
  2371  
  2372  
  2373  # # Statsd Server
  2374  # [[inputs.statsd]]
  2375  #   ## Address and port to host UDP listener on
  2376  #   service_address = ":8125"
  2377  #
  2378  #   ## The following configuration options control when telegraf clears it's cache
  2379  #   ## of previous values. If set to false, then telegraf will only clear it's
  2380  #   ## cache when the daemon is restarted.
  2381  #   ## Reset gauges every interval (default=true)
  2382  #   delete_gauges = true
  2383  #   ## Reset counters every interval (default=true)
  2384  #   delete_counters = true
  2385  #   ## Reset sets every interval (default=true)
  2386  #   delete_sets = true
  2387  #   ## Reset timings & histograms every interval (default=true)
  2388  #   delete_timings = true
  2389  #
  2390  #   ## Percentiles to calculate for timing & histogram stats
  2391  #   percentiles = [90]
  2392  #
  2393  #   ## separator to use between elements of a statsd metric
  2394  #   metric_separator = "_"
  2395  #
  2396  #   ## Parses tags in the datadog statsd format
  2397  #   ## http://docs.datadoghq.com/guides/dogstatsd/
  2398  #   parse_data_dog_tags = false
  2399  #
  2400  #   ## Statsd data translation templates, more info can be read here:
  2401  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
  2402  #   # templates = [
  2403  #   #     "cpu.* measurement*"
  2404  #   # ]
  2405  #
  2406  #   ## Number of UDP messages allowed to queue up, once filled,
  2407  #   ## the statsd server will start dropping packets
  2408  #   allowed_pending_messages = 10000
  2409  #
  2410  #   ## Number of timing/histogram values to track per-measurement in the
  2411  #   ## calculation of percentiles. Raising this limit increases the accuracy
  2412  #   ## of percentiles but also increases the memory usage and cpu time.
  2413  #   percentile_limit = 1000
  2414  
  2415  
  2416  # # Stream a log file, like the tail -f command
  2417  # [[inputs.tail]]
  2418  #   ## files to tail.
  2419  #   ## These accept standard unix glob matching rules, but with the addition of
  2420  #   ## ** as a "super asterisk". ie:
  2421  #   ##   "/var/log/**.log"  -> recursively find all .log files in /var/log
  2422  #   ##   "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
  2423  #   ##   "/var/log/apache.log" -> just tail the apache log file
  2424  #   ##
  2425  #   ## See https://github.com/gobwas/glob for more examples
  2426  #   ##
  2427  #   files = ["/var/mymetrics.out"]
  2428  #   ## Read file from beginning.
  2429  #   from_beginning = false
  2430  #   ## Whether file is a named pipe
  2431  #   pipe = false
  2432  #
  2433  #   ## Data format to consume.
  2434  #   ## Each data format has its own unique set of configuration options, read
  2435  #   ## more about them here:
  2436  #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  2437  #   data_format = "influx"
  2438  
  2439  
  2440  # # Generic TCP listener
  2441  # [[inputs.tcp_listener]]
  2442  #   # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
  2443  #   # socket_listener plugin
  2444  #   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
  2445  
  2446  
  2447  # # Generic UDP listener
  2448  # [[inputs.udp_listener]]
  2449  #   # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
  2450  #   # socket_listener plugin
  2451  #   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
  2452  
  2453  
  2454  # # A Webhooks Event collector
  2455  # [[inputs.webhooks]]
  2456  #   ## Address and port to host Webhook listener on
  2457  #   service_address = ":1619"
  2458  #
  2459  #   [inputs.webhooks.filestack]
  2460  #     path = "/filestack"
  2461  #
  2462  #   [inputs.webhooks.github]
  2463  #     path = "/github"
  2464  #     # secret = ""
  2465  #
  2466  #   [inputs.webhooks.mandrill]
  2467  #     path = "/mandrill"
  2468  #
  2469  #   [inputs.webhooks.rollbar]
  2470  #     path = "/rollbar"
  2471  #
  2472  #   [inputs.webhooks.papertrail]
  2473  #     path = "/papertrail"
  2474