github.com/influxdata/influxdb/v2@v2.7.6/telegraf/plugins/inputs/inputs_test.go (about) 1 package inputs 2 3 import ( 4 "errors" 5 "reflect" 6 "testing" 7 8 "github.com/influxdata/influxdb/v2/telegraf/plugins" 9 ) 10 11 // local plugin 12 type telegrafPluginConfig interface { 13 TOML() string 14 Type() plugins.Type 15 PluginName() string 16 UnmarshalTOML(data interface{}) error 17 } 18 19 func TestType(t *testing.T) { 20 b := baseInput(0) 21 if b.Type() != plugins.Input { 22 t.Fatalf("input plugins type should be input, got %s", b.Type()) 23 } 24 } 25 26 func TestEncodeTOML(t *testing.T) { 27 cases := []struct { 28 name string 29 plugins map[telegrafPluginConfig]string 30 }{ 31 { 32 name: "test empty plugins", 33 plugins: map[telegrafPluginConfig]string{ 34 &CPUStats{}: `[[inputs.cpu]] 35 ## Whether to report per-cpu stats or not 36 percpu = true 37 ## Whether to report total system cpu stats or not 38 totalcpu = true 39 ## If true, collect raw CPU time metrics 40 collect_cpu_time = false 41 ## If true, compute and report the sum of all non-idle CPU states 42 report_active = false 43 `, 44 &DiskStats{}: `[[inputs.disk]] 45 ## By default stats will be gathered for all mount points. 46 ## Set mount_points will restrict the stats to only the specified mount points. 47 # mount_points = ["/"] 48 ## Ignore mount points by filesystem type. 49 ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] 50 `, 51 &DiskIO{}: `[[inputs.diskio]] 52 ## By default, telegraf will gather stats for all devices including 53 ## disk partitions. 54 ## Setting devices will restrict the stats to the specified devices. 55 # devices = ["sda", "sdb", "vd*"] 56 ## Uncomment the following line if you need disk serial numbers. 57 # skip_serial_number = false 58 # 59 ## On systems which support it, device metadata can be added in the form of 60 ## tags. 61 ## Currently only Linux is supported via udev properties. You can view 62 ## available properties for a device by running: 63 ## 'udevadm info -q property -n /dev/sda' 64 ## Note: Most, but not all, udev properties can be accessed this way. Properties 65 ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. 66 # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] 67 # 68 ## Using the same metadata source as device_tags, you can also customize the 69 ## name of the device via templates. 70 ## The 'name_templates' parameter is a list of templates to try and apply to 71 ## the device. The template may contain variables in the form of '$PROPERTY' or 72 ## '${PROPERTY}'. The first template which does not contain any variables not 73 ## present for the device is used as the device name tag. 74 ## The typical use case is for LVM volumes, to get the VG/LV name instead of 75 ## the near-meaningless DM-0 name. 76 # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] 77 `, 78 &Docker{}: `[[inputs.docker]] 79 ## Docker Endpoint 80 ## To use TCP, set endpoint = "tcp://[ip]:[port]" 81 ## To use environment variables (ie, docker-machine), set endpoint = "ENV" 82 endpoint = "" 83 # 84 ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) 85 gather_services = false 86 # 87 ## Only collect metrics for these containers, collect all if empty 88 container_names = [] 89 # 90 ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars 91 source_tag = false 92 # 93 ## Containers to include and exclude. Globs accepted. 94 ## Note that an empty array for both will include all containers 95 container_name_include = [] 96 container_name_exclude = [] 97 # 98 ## Container states to include and exclude. Globs accepted. 99 ## When empty only containers in the "running" state will be captured. 100 ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] 101 ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] 102 # container_state_include = [] 103 # container_state_exclude = [] 104 # 105 ## Timeout for docker list, info, and stats commands 106 timeout = "5s" 107 # 108 ## Whether to report for each container per-device blkio (8:0, 8:1...), 109 ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. 110 ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. 111 ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting 112 ## is honored. 113 perdevice = true 114 # 115 ## Specifies for which classes a per-device metric should be issued 116 ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) 117 ## Please note that this setting has no effect if 'perdevice' is set to 'true' 118 # perdevice_include = ["cpu"] 119 # 120 ## Whether to report for each container total blkio and network stats or not. 121 ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. 122 ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting 123 ## is honored. 124 total = false 125 # 126 ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. 127 ## Possible values are 'cpu', 'blkio' and 'network' 128 ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. 129 ## Please note that this setting has no effect if 'total' is set to 'false' 130 # total_include = ["cpu", "blkio", "network"] 131 # 132 ## Which environment variables should we use as a tag 133 ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] 134 # 135 ## docker labels to include and exclude as tags. Globs accepted. 136 ## Note that an empty array for both will include all labels as tags 137 docker_label_include = [] 138 docker_label_exclude = [] 139 # 140 ## Optional TLS Config 141 # tls_ca = "/etc/telegraf/ca.pem" 142 # tls_cert = "/etc/telegraf/cert.pem" 143 # tls_key = "/etc/telegraf/key.pem" 144 ## Use TLS but skip chain & host verification 145 # insecure_skip_verify = false 146 `, 147 &File{}: `[[inputs.file]] 148 ## Files to parse each interval. Accept standard unix glob matching rules, 149 ## as well as ** to match recursive files and directories. 150 files = [] 151 152 ## Name a tag containing the name of the file the data was parsed from. Leave empty 153 ## to disable. 154 # file_tag = "" 155 156 ## Character encoding to use when interpreting the file contents. Invalid 157 ## characters are replaced using the unicode replacement character. When set 158 ## to the empty string the data is not decoded to text. 159 ## ex: character_encoding = "utf-8" 160 ## character_encoding = "utf-16le" 161 ## character_encoding = "utf-16be" 162 ## character_encoding = "" 163 # character_encoding = "" 164 165 ## The dataformat to be read from files 166 ## Each data format has its own unique set of configuration options, read 167 ## more about them here: 168 ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 169 data_format = "influx" 170 `, 171 &Kernel{}: `[[inputs.kernel]] 172 # no configuration 173 `, 174 &Kubernetes{}: `[[inputs.kubernetes]] 175 ## URL for the kubelet 176 url = "" 177 178 ## Use bearer token for authorization. ('bearer_token' takes priority) 179 ## If both of these are empty, we'll use the default serviceaccount: 180 ## at: /run/secrets/kubernetes.io/serviceaccount/token 181 # bearer_token = "/path/to/bearer/token" 182 ## OR 183 # bearer_token_string = "abc_123" 184 185 ## Pod labels to be added as tags. An empty array for both include and 186 ## exclude will include all labels. 187 # label_include = [] 188 # label_exclude = ["*"] 189 190 ## Set response_timeout (default 5 seconds) 191 # response_timeout = "5s" 192 193 ## Optional TLS Config 194 # tls_ca = /path/to/cafile 195 # tls_cert = /path/to/certfile 196 # tls_key = /path/to/keyfile 197 ## Use TLS but skip chain & host verification 198 # insecure_skip_verify = false 199 `, 200 &LogParserPlugin{}: `[[inputs.logparser]] 201 ## Log files to parse. 202 ## These accept standard unix glob matching rules, but with the addition of 203 ## ** as a "super asterisk". ie: 204 ## /var/log/**.log -> recursively find all .log files in /var/log 205 ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log 206 ## /var/log/apache.log -> only tail the apache log file 207 files = [] 208 209 ## Read files that currently exist from the beginning. Files that are created 210 ## while telegraf is running (and that match the "files" globs) will always 211 ## be read from the beginning. 212 from_beginning = false 213 214 ## Method used to watch for file updates. Can be either "inotify" or "poll". 215 # watch_method = "inotify" 216 217 ## Parse logstash-style "grok" patterns: 218 [inputs.logparser.grok] 219 ## This is a list of patterns to check the given log file(s) for. 220 ## Note that adding patterns here increases processing time. The most 221 ## efficient configuration is to have one pattern per logparser. 222 ## Other common built-in patterns are: 223 ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) 224 ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) 225 patterns = ["%{COMBINED_LOG_FORMAT}"] 226 227 ## Name of the outputted measurement name. 228 measurement = "apache_access_log" 229 230 ## Full path(s) to custom pattern files. 231 custom_pattern_files = [] 232 233 ## Custom patterns can also be defined here. Put one pattern per line. 234 custom_patterns = ''' 235 ''' 236 237 ## Timezone allows you to provide an override for timestamps that 238 ## don't already include an offset 239 ## e.g. 04/06/2016 12:41:45 data one two 5.43µs 240 ## 241 ## Default: "" which renders UTC 242 ## Options are as follows: 243 ## 1. Local -- interpret based on machine localtime 244 ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones 245 ## 3. UTC -- or blank/unspecified, will return timestamp in UTC 246 # timezone = "Canada/Eastern" 247 248 ## When set to "disable", timestamp will not incremented if there is a 249 ## duplicate. 250 # unique_timestamp = "auto" 251 `, 252 &MemStats{}: `[[inputs.mem]] 253 # no configuration 254 `, 255 &NetIOStats{}: `[[inputs.net]] 256 ## By default, telegraf gathers stats from any up interface (excluding loopback) 257 ## Setting interfaces will tell it to gather these explicit interfaces, 258 ## regardless of status. 259 ## 260 # interfaces = ["eth0"] 261 ## 262 ## On linux systems telegraf also collects protocol stats. 263 ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. 264 ## 265 # ignore_protocol_stats = false 266 ## 267 `, 268 &NetResponse{}: `[[inputs.net_response]] 269 ## Protocol, must be "tcp" or "udp" 270 ## NOTE: because the "udp" protocol does not respond to requests, it requires 271 ## a send/expect string pair (see below). 272 protocol = "tcp" 273 ## Server address (default localhost) 274 address = "localhost:80" 275 276 ## Set timeout 277 # timeout = "1s" 278 279 ## Set read timeout (only used if expecting a response) 280 # read_timeout = "1s" 281 282 ## The following options are required for UDP checks. For TCP, they are 283 ## optional. The plugin will send the given string to the server and then 284 ## expect to receive the given 'expect' string back. 285 ## string sent to the server 286 # send = "ssh" 287 ## expected string in answer 288 # expect = "ssh" 289 290 ## Uncomment to remove deprecated fields 291 # fielddrop = ["result_type", "string_found"] 292 `, 293 &Nginx{}: `[[inputs.nginx]] 294 # An array of Nginx stub_status URI to gather stats. 295 urls = [] 296 297 ## Optional TLS Config 298 tls_ca = "/etc/telegraf/ca.pem" 299 tls_cert = "/etc/telegraf/cert.cer" 300 tls_key = "/etc/telegraf/key.key" 301 ## Use TLS but skip chain & host verification 302 insecure_skip_verify = false 303 304 # HTTP response timeout (default: 5s) 305 response_timeout = "5s" 306 `, 307 &Processes{}: `[[inputs.processes]] 308 # no configuration 309 `, 310 &Procstat{}: `[[inputs.procstat]] 311 ## PID file to monitor process 312 pid_file = "/var/run/nginx.pid" 313 ## executable name (ie, pgrep <exe>) 314 # exe = "" 315 ## pattern as argument for pgrep (ie, pgrep -f <pattern>) 316 # pattern = "nginx" 317 ## user as argument for pgrep (ie, pgrep -u <user>) 318 # user = "nginx" 319 ## Systemd unit name 320 # systemd_unit = "nginx.service" 321 ## CGroup name or path 322 # cgroup = "systemd/system.slice/nginx.service" 323 324 ## Windows service name 325 # win_service = "" 326 327 ## override for process_name 328 ## This is optional; default is sourced from /proc/<pid>/status 329 # process_name = "bar" 330 331 ## Field name prefix 332 # prefix = "" 333 334 ## When true add the full cmdline as a tag. 335 # cmdline_tag = false 336 337 ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. 338 # mode = "irix" 339 340 ## Add the PID as a tag instead of as a field. When collecting multiple 341 ## processes with otherwise matching tags this setting should be enabled to 342 ## ensure each process has a unique identity. 343 ## 344 ## Enabling this option may result in a large number of series, especially 345 ## when processes have a short lifetime. 346 # pid_tag = false 347 348 ## Method to use when finding process IDs. Can be one of 'pgrep', or 349 ## 'native'. The pgrep finder calls the pgrep executable in the PATH while 350 ## the native finder performs the search directly in a manor dependent on the 351 ## platform. Default is 'pgrep' 352 # pid_finder = "pgrep" 353 `, 354 &Prometheus{}: `[[inputs.prometheus]] 355 ## An array of urls to scrape metrics from. 356 urls = [] 357 358 ## Metric version controls the mapping from Prometheus metrics into 359 ## Telegraf metrics. When using the prometheus_client output, use the same 360 ## value in both plugins to ensure metrics are round-tripped without 361 ## modification. 362 ## 363 ## example: metric_version = 1; 364 ## metric_version = 2; recommended version 365 # metric_version = 1 366 367 ## Url tag name (tag containing scrapped url. optional, default is "url") 368 # url_tag = "url" 369 370 ## An array of Kubernetes services to scrape metrics from. 371 # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] 372 373 ## Kubernetes config file to create client from. 374 # kube_config = "/path/to/kubernetes.config" 375 376 ## Scrape Kubernetes pods for the following prometheus annotations: 377 ## - prometheus.io/scrape: Enable scraping for this pod 378 ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to 379 ## set this to 'https' & most likely set the tls config. 380 ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. 381 ## - prometheus.io/port: If port is not 9102 use this annotation 382 # monitor_kubernetes_pods = true 383 ## Get the list of pods to scrape with either the scope of 384 ## - cluster: the kubernetes watch api (default, no need to specify) 385 ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. 386 # pod_scrape_scope = "cluster" 387 ## Only for node scrape scope: node IP of the node that telegraf is running on. 388 ## Either this config or the environment variable NODE_IP must be set. 389 # node_ip = "10.180.1.1" 390 # ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. 391 # ## Default is 60 seconds. 392 # # pod_scrape_interval = 60 393 ## Restricts Kubernetes monitoring to a single namespace 394 ## ex: monitor_kubernetes_pods_namespace = "default" 395 # monitor_kubernetes_pods_namespace = "" 396 # label selector to target pods which have the label 397 # kubernetes_label_selector = "env=dev,app=nginx" 398 # field selector to target pods 399 # eg. To scrape pods on a specific node 400 # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" 401 402 ## Use bearer token for authorization. ('bearer_token' takes priority) 403 # bearer_token = "/path/to/bearer/token" 404 ## OR 405 # bearer_token_string = "abc_123" 406 407 ## HTTP Basic Authentication username and password. ('bearer_token' and 408 ## 'bearer_token_string' take priority) 409 # username = "" 410 # password = "" 411 412 ## Specify timeout duration for slower prometheus clients (default is 3s) 413 # response_timeout = "3s" 414 415 ## Optional TLS Config 416 # tls_ca = /path/to/cafile 417 # tls_cert = /path/to/certfile 418 # tls_key = /path/to/keyfile 419 ## Use TLS but skip chain & host verification 420 # insecure_skip_verify = false 421 `, 422 &Redis{}: `[[inputs.redis]] 423 ## specify servers via a url matching: 424 ## [protocol://][:password]@address[:port] 425 ## e.g. 426 ## tcp://localhost:6379 427 ## tcp://:password@192.168.99.100 428 ## unix:///var/run/redis.sock 429 ## 430 ## If no servers are specified, then localhost is used as the host. 431 ## If no port is specified, 6379 is used 432 servers = [] 433 434 ## Optional. Specify redis commands to retrieve values 435 # [[inputs.redis.commands]] 436 # # The command to run where each argument is a separate element 437 # command = ["get", "sample-key"] 438 # # The field to store the result in 439 # field = "sample-key-value" 440 # # The type of the result 441 # # Can be "string", "integer", or "float" 442 # type = "string" 443 444 ## specify server password 445 # password = "" 446 447 ## Optional TLS Config 448 # tls_ca = "/etc/telegraf/ca.pem" 449 # tls_cert = "/etc/telegraf/cert.pem" 450 # tls_key = "/etc/telegraf/key.pem" 451 ## Use TLS but skip chain & host verification 452 # insecure_skip_verify = true 453 `, 454 &SwapStats{}: `[[inputs.swap]] 455 # no configuration 456 `, 457 &Syslog{}: `[[inputs.syslog]] 458 ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 459 ## Protocol, address and port to host the syslog receiver. 460 ## If no host is specified, then localhost is used. 461 ## If no port is specified, 6514 is used (RFC5425#section-4.1). 462 server = "" 463 464 ## TLS Config 465 # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] 466 # tls_cert = "/etc/telegraf/cert.pem" 467 # tls_key = "/etc/telegraf/key.pem" 468 469 ## Period between keep alive probes. 470 ## 0 disables keep alive probes. 471 ## Defaults to the OS configuration. 472 ## Only applies to stream sockets (e.g. TCP). 473 # keep_alive_period = "5m" 474 475 ## Maximum number of concurrent connections (default = 0). 476 ## 0 means unlimited. 477 ## Only applies to stream sockets (e.g. TCP). 478 # max_connections = 1024 479 480 ## Read timeout is the maximum time allowed for reading a single message (default = 5s). 481 ## 0 means unlimited. 482 # read_timeout = "5s" 483 484 ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). 485 ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), 486 ## or the non-transparent framing technique (RFC6587#section-3.4.2). 487 ## Must be one of "octet-counting", "non-transparent". 488 # framing = "octet-counting" 489 490 ## The trailer to be expected in case of non-transparent framing (default = "LF"). 491 ## Must be one of "LF", or "NUL". 492 # trailer = "LF" 493 494 ## Whether to parse in best effort mode or not (default = false). 495 ## By default best effort parsing is off. 496 # best_effort = false 497 498 ## Character to prepend to SD-PARAMs (default = "_"). 499 ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. 500 ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] 501 ## For each combination a field is created. 502 ## Its name is created concatenating identifier, sdparam_separator, and parameter name. 503 # sdparam_separator = "_" 504 `, 505 &SystemStats{}: `[[inputs.system]] 506 ## Uncomment to remove deprecated metrics. 507 # fielddrop = ["uptime_format"] 508 `, 509 &Tail{}: `[[inputs.tail]] 510 ## File names or a pattern to tail. 511 ## These accept standard unix glob matching rules, but with the addition of 512 ## ** as a "super asterisk". ie: 513 ## "/var/log/**.log" -> recursively find all .log files in /var/log 514 ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log 515 ## "/var/log/apache.log" -> just tail the apache log file 516 ## "/var/log/log[!1-2]* -> tail files without 1-2 517 ## "/var/log/log[^1-2]* -> identical behavior as above 518 ## See https://github.com/gobwas/glob for more examples 519 ## 520 files = [] 521 522 ## Read file from beginning. 523 # from_beginning = false 524 525 ## Whether file is a named pipe 526 # pipe = false 527 528 ## Method used to watch for file updates. Can be either "inotify" or "poll". 529 # watch_method = "inotify" 530 531 ## Maximum lines of the file to process that have not yet be written by the 532 ## output. For best throughput set based on the number of metrics on each 533 ## line and the size of the output's metric_batch_size. 534 # max_undelivered_lines = 1000 535 536 ## Character encoding to use when interpreting the file contents. Invalid 537 ## characters are replaced using the unicode replacement character. When set 538 ## to the empty string the data is not decoded to text. 539 ## ex: character_encoding = "utf-8" 540 ## character_encoding = "utf-16le" 541 ## character_encoding = "utf-16be" 542 ## character_encoding = "" 543 # character_encoding = "" 544 545 ## Data format to consume. 546 ## Each data format has its own unique set of configuration options, read 547 ## more about them here: 548 ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 549 data_format = "influx" 550 551 ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. 552 # path_tag = "path" 553 554 ## multiline parser/codec 555 ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html 556 #[inputs.tail.multiline] 557 ## The pattern should be a regexp which matches what you believe to be an 558 ## indicator that the field is part of an event consisting of multiple lines of log data. 559 #pattern = "^\s" 560 561 ## This field must be either "previous" or "next". 562 ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, 563 ## whereas "next" indicates that the line belongs to the next one. 564 #match_which_line = "previous" 565 566 ## The invert_match field can be true or false (defaults to false). 567 ## If true, a message not matching the pattern will constitute a match of the multiline 568 ## filter and the what will be applied. (vice-versa is also true) 569 #invert_match = false 570 571 ## After the specified timeout, this plugin sends a multiline event even if no new pattern 572 ## is found to start a new event. The default timeout is 5s. 573 #timeout = 5s 574 `, 575 }, 576 }, 577 { 578 name: "standard testing", 579 plugins: map[telegrafPluginConfig]string{ 580 &Docker{ 581 Endpoint: "unix:///var/run/docker.sock", 582 }: `[[inputs.docker]] 583 ## Docker Endpoint 584 ## To use TCP, set endpoint = "tcp://[ip]:[port]" 585 ## To use environment variables (ie, docker-machine), set endpoint = "ENV" 586 endpoint = "unix:///var/run/docker.sock" 587 # 588 ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) 589 gather_services = false 590 # 591 ## Only collect metrics for these containers, collect all if empty 592 container_names = [] 593 # 594 ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars 595 source_tag = false 596 # 597 ## Containers to include and exclude. Globs accepted. 598 ## Note that an empty array for both will include all containers 599 container_name_include = [] 600 container_name_exclude = [] 601 # 602 ## Container states to include and exclude. Globs accepted. 603 ## When empty only containers in the "running" state will be captured. 604 ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] 605 ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] 606 # container_state_include = [] 607 # container_state_exclude = [] 608 # 609 ## Timeout for docker list, info, and stats commands 610 timeout = "5s" 611 # 612 ## Whether to report for each container per-device blkio (8:0, 8:1...), 613 ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. 614 ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. 615 ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting 616 ## is honored. 617 perdevice = true 618 # 619 ## Specifies for which classes a per-device metric should be issued 620 ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) 621 ## Please note that this setting has no effect if 'perdevice' is set to 'true' 622 # perdevice_include = ["cpu"] 623 # 624 ## Whether to report for each container total blkio and network stats or not. 625 ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. 626 ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting 627 ## is honored. 628 total = false 629 # 630 ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. 631 ## Possible values are 'cpu', 'blkio' and 'network' 632 ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. 633 ## Please note that this setting has no effect if 'total' is set to 'false' 634 # total_include = ["cpu", "blkio", "network"] 635 # 636 ## Which environment variables should we use as a tag 637 ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] 638 # 639 ## docker labels to include and exclude as tags. Globs accepted. 640 ## Note that an empty array for both will include all labels as tags 641 docker_label_include = [] 642 docker_label_exclude = [] 643 # 644 ## Optional TLS Config 645 # tls_ca = "/etc/telegraf/ca.pem" 646 # tls_cert = "/etc/telegraf/cert.pem" 647 # tls_key = "/etc/telegraf/key.pem" 648 ## Use TLS but skip chain & host verification 649 # insecure_skip_verify = false 650 `, 651 &File{ 652 Files: []string{ 653 "/var/log/**.log", 654 "/var/log/apache.log", 655 }, 656 }: `[[inputs.file]] 657 ## Files to parse each interval. Accept standard unix glob matching rules, 658 ## as well as ** to match recursive files and directories. 659 files = ["/var/log/**.log", "/var/log/apache.log"] 660 661 ## Name a tag containing the name of the file the data was parsed from. Leave empty 662 ## to disable. 663 # file_tag = "" 664 665 ## Character encoding to use when interpreting the file contents. Invalid 666 ## characters are replaced using the unicode replacement character. When set 667 ## to the empty string the data is not decoded to text. 668 ## ex: character_encoding = "utf-8" 669 ## character_encoding = "utf-16le" 670 ## character_encoding = "utf-16be" 671 ## character_encoding = "" 672 # character_encoding = "" 673 674 ## The dataformat to be read from files 675 ## Each data format has its own unique set of configuration options, read 676 ## more about them here: 677 ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 678 data_format = "influx" 679 `, 680 &Kubernetes{URL: "http://1.1.1.1:10255"}: `[[inputs.kubernetes]] 681 ## URL for the kubelet 682 url = "http://1.1.1.1:10255" 683 684 ## Use bearer token for authorization. ('bearer_token' takes priority) 685 ## If both of these are empty, we'll use the default serviceaccount: 686 ## at: /run/secrets/kubernetes.io/serviceaccount/token 687 # bearer_token = "/path/to/bearer/token" 688 ## OR 689 # bearer_token_string = "abc_123" 690 691 ## Pod labels to be added as tags. An empty array for both include and 692 ## exclude will include all labels. 693 # label_include = [] 694 # label_exclude = ["*"] 695 696 ## Set response_timeout (default 5 seconds) 697 # response_timeout = "5s" 698 699 ## Optional TLS Config 700 # tls_ca = /path/to/cafile 701 # tls_cert = /path/to/certfile 702 # tls_key = /path/to/keyfile 703 ## Use TLS but skip chain & host verification 704 # insecure_skip_verify = false 705 `, 706 &LogParserPlugin{ 707 Files: []string{ 708 "/var/log/**.log", 709 "/var/log/apache.log", 710 }, 711 }: `[[inputs.logparser]] 712 ## Log files to parse. 713 ## These accept standard unix glob matching rules, but with the addition of 714 ## ** as a "super asterisk". ie: 715 ## /var/log/**.log -> recursively find all .log files in /var/log 716 ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log 717 ## /var/log/apache.log -> only tail the apache log file 718 files = ["/var/log/**.log", "/var/log/apache.log"] 719 720 ## Read files that currently exist from the beginning. Files that are created 721 ## while telegraf is running (and that match the "files" globs) will always 722 ## be read from the beginning. 723 from_beginning = false 724 725 ## Method used to watch for file updates. Can be either "inotify" or "poll". 726 # watch_method = "inotify" 727 728 ## Parse logstash-style "grok" patterns: 729 [inputs.logparser.grok] 730 ## This is a list of patterns to check the given log file(s) for. 731 ## Note that adding patterns here increases processing time. The most 732 ## efficient configuration is to have one pattern per logparser. 733 ## Other common built-in patterns are: 734 ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) 735 ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) 736 patterns = ["%{COMBINED_LOG_FORMAT}"] 737 738 ## Name of the outputted measurement name. 739 measurement = "apache_access_log" 740 741 ## Full path(s) to custom pattern files. 742 custom_pattern_files = [] 743 744 ## Custom patterns can also be defined here. Put one pattern per line. 745 custom_patterns = ''' 746 ''' 747 748 ## Timezone allows you to provide an override for timestamps that 749 ## don't already include an offset 750 ## e.g. 04/06/2016 12:41:45 data one two 5.43µs 751 ## 752 ## Default: "" which renders UTC 753 ## Options are as follows: 754 ## 1. Local -- interpret based on machine localtime 755 ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones 756 ## 3. UTC -- or blank/unspecified, will return timestamp in UTC 757 # timezone = "Canada/Eastern" 758 759 ## When set to "disable", timestamp will not incremented if there is a 760 ## duplicate. 761 # unique_timestamp = "auto" 762 `, 763 &Nginx{ 764 URLs: []string{ 765 "http://localhost/server_status", 766 "http://192.168.1.1/server_status", 767 }, 768 }: `[[inputs.nginx]] 769 # An array of Nginx stub_status URI to gather stats. 770 urls = ["http://localhost/server_status", "http://192.168.1.1/server_status"] 771 772 ## Optional TLS Config 773 tls_ca = "/etc/telegraf/ca.pem" 774 tls_cert = "/etc/telegraf/cert.cer" 775 tls_key = "/etc/telegraf/key.key" 776 ## Use TLS but skip chain & host verification 777 insecure_skip_verify = false 778 779 # HTTP response timeout (default: 5s) 780 response_timeout = "5s" 781 `, 782 &Procstat{ 783 Exe: "finder", 784 }: `[[inputs.procstat]] 785 ## PID file to monitor process 786 pid_file = "/var/run/nginx.pid" 787 ## executable name (ie, pgrep <exe>) 788 # exe = "finder" 789 ## pattern as argument for pgrep (ie, pgrep -f <pattern>) 790 # pattern = "nginx" 791 ## user as argument for pgrep (ie, pgrep -u <user>) 792 # user = "nginx" 793 ## Systemd unit name 794 # systemd_unit = "nginx.service" 795 ## CGroup name or path 796 # cgroup = "systemd/system.slice/nginx.service" 797 798 ## Windows service name 799 # win_service = "" 800 801 ## override for process_name 802 ## This is optional; default is sourced from /proc/<pid>/status 803 # process_name = "bar" 804 805 ## Field name prefix 806 # prefix = "" 807 808 ## When true add the full cmdline as a tag. 809 # cmdline_tag = false 810 811 ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. 812 # mode = "irix" 813 814 ## Add the PID as a tag instead of as a field. When collecting multiple 815 ## processes with otherwise matching tags this setting should be enabled to 816 ## ensure each process has a unique identity. 817 ## 818 ## Enabling this option may result in a large number of series, especially 819 ## when processes have a short lifetime. 820 # pid_tag = false 821 822 ## Method to use when finding process IDs. Can be one of 'pgrep', or 823 ## 'native'. The pgrep finder calls the pgrep executable in the PATH while 824 ## the native finder performs the search directly in a manor dependent on the 825 ## platform. Default is 'pgrep' 826 # pid_finder = "pgrep" 827 `, 828 &Prometheus{ 829 URLs: []string{ 830 "http://192.168.2.1:9090", 831 "http://192.168.2.2:9090", 832 }, 833 }: `[[inputs.prometheus]] 834 ## An array of urls to scrape metrics from. 835 urls = ["http://192.168.2.1:9090", "http://192.168.2.2:9090"] 836 837 ## Metric version controls the mapping from Prometheus metrics into 838 ## Telegraf metrics. When using the prometheus_client output, use the same 839 ## value in both plugins to ensure metrics are round-tripped without 840 ## modification. 841 ## 842 ## example: metric_version = 1; 843 ## metric_version = 2; recommended version 844 # metric_version = 1 845 846 ## Url tag name (tag containing scrapped url. optional, default is "url") 847 # url_tag = "url" 848 849 ## An array of Kubernetes services to scrape metrics from. 850 # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] 851 852 ## Kubernetes config file to create client from. 853 # kube_config = "/path/to/kubernetes.config" 854 855 ## Scrape Kubernetes pods for the following prometheus annotations: 856 ## - prometheus.io/scrape: Enable scraping for this pod 857 ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to 858 ## set this to 'https' & most likely set the tls config. 859 ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. 860 ## - prometheus.io/port: If port is not 9102 use this annotation 861 # monitor_kubernetes_pods = true 862 ## Get the list of pods to scrape with either the scope of 863 ## - cluster: the kubernetes watch api (default, no need to specify) 864 ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. 865 # pod_scrape_scope = "cluster" 866 ## Only for node scrape scope: node IP of the node that telegraf is running on. 867 ## Either this config or the environment variable NODE_IP must be set. 868 # node_ip = "10.180.1.1" 869 # ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. 870 # ## Default is 60 seconds. 871 # # pod_scrape_interval = 60 872 ## Restricts Kubernetes monitoring to a single namespace 873 ## ex: monitor_kubernetes_pods_namespace = "default" 874 # monitor_kubernetes_pods_namespace = "" 875 # label selector to target pods which have the label 876 # kubernetes_label_selector = "env=dev,app=nginx" 877 # field selector to target pods 878 # eg. To scrape pods on a specific node 879 # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" 880 881 ## Use bearer token for authorization. ('bearer_token' takes priority) 882 # bearer_token = "/path/to/bearer/token" 883 ## OR 884 # bearer_token_string = "abc_123" 885 886 ## HTTP Basic Authentication username and password. ('bearer_token' and 887 ## 'bearer_token_string' take priority) 888 # username = "" 889 # password = "" 890 891 ## Specify timeout duration for slower prometheus clients (default is 3s) 892 # response_timeout = "3s" 893 894 ## Optional TLS Config 895 # tls_ca = /path/to/cafile 896 # tls_cert = /path/to/certfile 897 # tls_key = /path/to/keyfile 898 ## Use TLS but skip chain & host verification 899 # insecure_skip_verify = false 900 `, 901 &Redis{ 902 Servers: []string{ 903 "tcp://localhost:6379", 904 "unix:///var/run/redis.sock", 905 }, 906 Password: "somepassword123", 907 }: `[[inputs.redis]] 908 ## specify servers via a url matching: 909 ## [protocol://][:password]@address[:port] 910 ## e.g. 911 ## tcp://localhost:6379 912 ## tcp://:password@192.168.99.100 913 ## unix:///var/run/redis.sock 914 ## 915 ## If no servers are specified, then localhost is used as the host. 916 ## If no port is specified, 6379 is used 917 servers = ["tcp://localhost:6379", "unix:///var/run/redis.sock"] 918 919 ## Optional. Specify redis commands to retrieve values 920 # [[inputs.redis.commands]] 921 # # The command to run where each argument is a separate element 922 # command = ["get", "sample-key"] 923 # # The field to store the result in 924 # field = "sample-key-value" 925 # # The type of the result 926 # # Can be "string", "integer", or "float" 927 # type = "string" 928 929 ## specify server password 930 # password = "somepassword123" 931 932 ## Optional TLS Config 933 # tls_ca = "/etc/telegraf/ca.pem" 934 # tls_cert = "/etc/telegraf/cert.pem" 935 # tls_key = "/etc/telegraf/key.pem" 936 ## Use TLS but skip chain & host verification 937 # insecure_skip_verify = true 938 `, 939 &Syslog{ 940 Address: "tcp://10.0.0.1:6514", 941 }: `[[inputs.syslog]] 942 ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 943 ## Protocol, address and port to host the syslog receiver. 944 ## If no host is specified, then localhost is used. 945 ## If no port is specified, 6514 is used (RFC5425#section-4.1). 946 server = "tcp://10.0.0.1:6514" 947 948 ## TLS Config 949 # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] 950 # tls_cert = "/etc/telegraf/cert.pem" 951 # tls_key = "/etc/telegraf/key.pem" 952 953 ## Period between keep alive probes. 954 ## 0 disables keep alive probes. 955 ## Defaults to the OS configuration. 956 ## Only applies to stream sockets (e.g. TCP). 957 # keep_alive_period = "5m" 958 959 ## Maximum number of concurrent connections (default = 0). 960 ## 0 means unlimited. 961 ## Only applies to stream sockets (e.g. TCP). 962 # max_connections = 1024 963 964 ## Read timeout is the maximum time allowed for reading a single message (default = 5s). 965 ## 0 means unlimited. 966 # read_timeout = "5s" 967 968 ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). 969 ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), 970 ## or the non-transparent framing technique (RFC6587#section-3.4.2). 971 ## Must be one of "octet-counting", "non-transparent". 972 # framing = "octet-counting" 973 974 ## The trailer to be expected in case of non-transparent framing (default = "LF"). 975 ## Must be one of "LF", or "NUL". 976 # trailer = "LF" 977 978 ## Whether to parse in best effort mode or not (default = false). 979 ## By default best effort parsing is off. 980 # best_effort = false 981 982 ## Character to prepend to SD-PARAMs (default = "_"). 983 ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. 984 ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] 985 ## For each combination a field is created. 986 ## Its name is created concatenating identifier, sdparam_separator, and parameter name. 987 # sdparam_separator = "_" 988 `, 989 &Tail{ 990 Files: []string{"/var/log/**.log", "/var/log/apache.log"}, 991 }: `[[inputs.tail]] 992 ## File names or a pattern to tail. 993 ## These accept standard unix glob matching rules, but with the addition of 994 ## ** as a "super asterisk". ie: 995 ## "/var/log/**.log" -> recursively find all .log files in /var/log 996 ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log 997 ## "/var/log/apache.log" -> just tail the apache log file 998 ## "/var/log/log[!1-2]* -> tail files without 1-2 999 ## "/var/log/log[^1-2]* -> identical behavior as above 1000 ## See https://github.com/gobwas/glob for more examples 1001 ## 1002 files = ["/var/log/**.log", "/var/log/apache.log"] 1003 1004 ## Read file from beginning. 1005 # from_beginning = false 1006 1007 ## Whether file is a named pipe 1008 # pipe = false 1009 1010 ## Method used to watch for file updates. Can be either "inotify" or "poll". 1011 # watch_method = "inotify" 1012 1013 ## Maximum lines of the file to process that have not yet be written by the 1014 ## output. For best throughput set based on the number of metrics on each 1015 ## line and the size of the output's metric_batch_size. 1016 # max_undelivered_lines = 1000 1017 1018 ## Character encoding to use when interpreting the file contents. Invalid 1019 ## characters are replaced using the unicode replacement character. When set 1020 ## to the empty string the data is not decoded to text. 1021 ## ex: character_encoding = "utf-8" 1022 ## character_encoding = "utf-16le" 1023 ## character_encoding = "utf-16be" 1024 ## character_encoding = "" 1025 # character_encoding = "" 1026 1027 ## Data format to consume. 1028 ## Each data format has its own unique set of configuration options, read 1029 ## more about them here: 1030 ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 1031 data_format = "influx" 1032 1033 ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. 1034 # path_tag = "path" 1035 1036 ## multiline parser/codec 1037 ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html 1038 #[inputs.tail.multiline] 1039 ## The pattern should be a regexp which matches what you believe to be an 1040 ## indicator that the field is part of an event consisting of multiple lines of log data. 1041 #pattern = "^\s" 1042 1043 ## This field must be either "previous" or "next". 1044 ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, 1045 ## whereas "next" indicates that the line belongs to the next one. 1046 #match_which_line = "previous" 1047 1048 ## The invert_match field can be true or false (defaults to false). 1049 ## If true, a message not matching the pattern will constitute a match of the multiline 1050 ## filter and the what will be applied. (vice-versa is also true) 1051 #invert_match = false 1052 1053 ## After the specified timeout, this plugin sends a multiline event even if no new pattern 1054 ## is found to start a new event. The default timeout is 5s. 1055 #timeout = 5s 1056 `, 1057 }, 1058 }, 1059 } 1060 for _, c := range cases { 1061 for input, toml := range c.plugins { 1062 if toml != input.TOML() { 1063 t.Fatalf("%s failed want %s, got %v", c.name, toml, input.TOML()) 1064 } 1065 } 1066 } 1067 } 1068 1069 func TestDecodeTOML(t *testing.T) { 1070 cases := []struct { 1071 name string 1072 want telegrafPluginConfig 1073 wantErr error 1074 input telegrafPluginConfig 1075 data interface{} 1076 }{ 1077 { 1078 name: "cpu", 1079 want: &CPUStats{}, 1080 input: &CPUStats{}, 1081 }, 1082 { 1083 name: "disk", 1084 want: &DiskStats{}, 1085 input: &DiskStats{}, 1086 }, 1087 { 1088 name: "diskio", 1089 want: &DiskIO{}, 1090 input: &DiskIO{}, 1091 }, 1092 { 1093 name: "docker bad data", 1094 want: &Docker{}, 1095 wantErr: errors.New("bad endpoint for docker input plugin"), 1096 input: &Docker{}, 1097 data: map[string]int{}, 1098 }, 1099 { 1100 name: "docker", 1101 want: &Docker{ 1102 Endpoint: "unix:///var/run/docker.sock", 1103 }, 1104 input: &Docker{}, 1105 data: map[string]interface{}{ 1106 "endpoint": "unix:///var/run/docker.sock", 1107 }, 1108 }, 1109 { 1110 name: "file empty", 1111 want: &File{}, 1112 wantErr: errors.New("bad files for file input plugin"), 1113 input: &File{}, 1114 }, 1115 { 1116 name: "file bad data not array", 1117 want: &File{}, 1118 wantErr: errors.New("not an array for file input plugin"), 1119 input: &File{}, 1120 data: map[string]interface{}{ 1121 "files": "", 1122 }, 1123 }, 1124 { 1125 name: "file", 1126 want: &File{ 1127 Files: []string{ 1128 "/var/log/**.log", 1129 "/var/log/apache.log", 1130 }, 1131 }, 1132 input: &File{}, 1133 data: map[string]interface{}{ 1134 "files": []interface{}{ 1135 "/var/log/**.log", 1136 "/var/log/apache.log", 1137 }, 1138 }, 1139 }, 1140 { 1141 name: "kernel", 1142 want: &Kernel{}, 1143 input: &Kernel{}, 1144 }, 1145 { 1146 name: "kubernetes empty", 1147 want: &Kubernetes{}, 1148 wantErr: errors.New("bad url for kubernetes input plugin"), 1149 input: &Kubernetes{}, 1150 }, 1151 { 1152 name: "kubernetes", 1153 want: &Kubernetes{ 1154 URL: "http://1.1.1.1:10255", 1155 }, 1156 input: &Kubernetes{}, 1157 data: map[string]interface{}{ 1158 "url": "http://1.1.1.1:10255", 1159 }, 1160 }, 1161 { 1162 name: "logparser empty", 1163 want: &LogParserPlugin{}, 1164 wantErr: errors.New("bad files for logparser input plugin"), 1165 input: &LogParserPlugin{}, 1166 }, 1167 { 1168 name: "logparser file not array", 1169 want: &LogParserPlugin{}, 1170 wantErr: errors.New("files is not an array for logparser input plugin"), 1171 input: &LogParserPlugin{}, 1172 data: map[string]interface{}{ 1173 "files": "ok", 1174 }, 1175 }, 1176 { 1177 name: "logparser", 1178 want: &LogParserPlugin{ 1179 Files: []string{ 1180 "/var/log/**.log", 1181 "/var/log/apache.log", 1182 }, 1183 }, 1184 input: &LogParserPlugin{}, 1185 data: map[string]interface{}{ 1186 "files": []interface{}{ 1187 "/var/log/**.log", 1188 "/var/log/apache.log", 1189 }, 1190 }, 1191 }, 1192 { 1193 name: "mem", 1194 want: &MemStats{}, 1195 input: &MemStats{}, 1196 }, 1197 { 1198 name: "net_response", 1199 want: &NetResponse{}, 1200 input: &NetResponse{}, 1201 }, 1202 { 1203 name: "net", 1204 want: &NetIOStats{}, 1205 input: &NetIOStats{}, 1206 }, 1207 { 1208 name: "nginx empty", 1209 want: &Nginx{}, 1210 wantErr: errors.New("bad urls for nginx input plugin"), 1211 input: &Nginx{}, 1212 }, 1213 { 1214 name: "nginx bad data not array", 1215 want: &Nginx{}, 1216 wantErr: errors.New("urls is not an array for nginx input plugin"), 1217 input: &Nginx{}, 1218 data: map[string]interface{}{ 1219 "urls": "", 1220 }, 1221 }, 1222 { 1223 name: "nginx", 1224 want: &Nginx{ 1225 URLs: []string{ 1226 "http://localhost/server_status", 1227 "http://192.168.1.1/server_status", 1228 }, 1229 }, 1230 input: &Nginx{}, 1231 data: map[string]interface{}{ 1232 "urls": []interface{}{ 1233 "http://localhost/server_status", 1234 "http://192.168.1.1/server_status", 1235 }, 1236 }, 1237 }, 1238 { 1239 name: "processes", 1240 want: &Processes{}, 1241 input: &Processes{}, 1242 }, 1243 { 1244 name: "procstat empty", 1245 want: &Procstat{}, 1246 wantErr: errors.New("bad exe for procstat input plugin"), 1247 input: &Procstat{}, 1248 }, 1249 { 1250 name: "procstat", 1251 want: &Procstat{ 1252 Exe: "finder", 1253 }, 1254 input: &Procstat{}, 1255 data: map[string]interface{}{ 1256 "exe": "finder", 1257 }, 1258 }, 1259 { 1260 name: "prometheus empty", 1261 want: &Prometheus{}, 1262 wantErr: errors.New("bad urls for prometheus input plugin"), 1263 input: &Prometheus{}, 1264 }, 1265 { 1266 name: "prometheus bad data not array", 1267 want: &Prometheus{}, 1268 wantErr: errors.New("urls is not an array for prometheus input plugin"), 1269 input: &Prometheus{}, 1270 data: map[string]interface{}{ 1271 "urls": "", 1272 }, 1273 }, 1274 { 1275 name: "prometheus", 1276 want: &Prometheus{ 1277 URLs: []string{ 1278 "http://192.168.2.1:9090", 1279 "http://192.168.2.2:9090", 1280 }, 1281 }, 1282 input: &Prometheus{}, 1283 data: map[string]interface{}{ 1284 "urls": []interface{}{ 1285 "http://192.168.2.1:9090", 1286 "http://192.168.2.2:9090", 1287 }, 1288 }, 1289 }, 1290 { 1291 name: "redis empty", 1292 want: &Redis{}, 1293 wantErr: errors.New("bad servers for redis input plugin"), 1294 input: &Redis{}, 1295 }, 1296 { 1297 name: "redis bad data not array", 1298 want: &Redis{}, 1299 wantErr: errors.New("servers is not an array for redis input plugin"), 1300 input: &Redis{}, 1301 data: map[string]interface{}{ 1302 "servers": "", 1303 }, 1304 }, 1305 { 1306 name: "redis without password", 1307 want: &Redis{ 1308 Servers: []string{ 1309 "http://192.168.2.1:9090", 1310 "http://192.168.2.2:9090", 1311 }, 1312 }, 1313 input: &Redis{}, 1314 data: map[string]interface{}{ 1315 "servers": []interface{}{ 1316 "http://192.168.2.1:9090", 1317 "http://192.168.2.2:9090", 1318 }, 1319 }, 1320 }, 1321 { 1322 name: "redis with password", 1323 want: &Redis{ 1324 Servers: []string{ 1325 "http://192.168.2.1:9090", 1326 "http://192.168.2.2:9090", 1327 }, 1328 Password: "pass1", 1329 }, 1330 input: &Redis{}, 1331 data: map[string]interface{}{ 1332 "servers": []interface{}{ 1333 "http://192.168.2.1:9090", 1334 "http://192.168.2.2:9090", 1335 }, 1336 "password": "pass1", 1337 }, 1338 }, 1339 { 1340 name: "swap", 1341 want: &SwapStats{}, 1342 input: &SwapStats{}, 1343 }, 1344 { 1345 name: "syslog empty", 1346 want: &Syslog{}, 1347 wantErr: errors.New("bad server for syslog input plugin"), 1348 input: &Syslog{}, 1349 }, 1350 { 1351 name: "syslog", 1352 want: &Syslog{ 1353 Address: "http://1.1.1.1:10255", 1354 }, 1355 input: &Syslog{}, 1356 data: map[string]interface{}{ 1357 "server": "http://1.1.1.1:10255", 1358 }, 1359 }, 1360 { 1361 name: "system", 1362 want: &SystemStats{}, 1363 input: &SystemStats{}, 1364 }, 1365 { 1366 name: "tail empty", 1367 want: &Tail{}, 1368 wantErr: errors.New("bad files for tail input plugin"), 1369 input: &Tail{}, 1370 }, 1371 { 1372 name: "tail bad data not array", 1373 want: &Tail{}, 1374 wantErr: errors.New("not an array for tail input plugin"), 1375 input: &Tail{}, 1376 data: map[string]interface{}{ 1377 "files": "", 1378 }, 1379 }, 1380 { 1381 name: "tail", 1382 want: &Tail{ 1383 Files: []string{ 1384 "http://192.168.2.1:9090", 1385 "http://192.168.2.2:9090", 1386 }, 1387 }, 1388 input: &Tail{}, 1389 data: map[string]interface{}{ 1390 "files": []interface{}{ 1391 "http://192.168.2.1:9090", 1392 "http://192.168.2.2:9090", 1393 }, 1394 }, 1395 }, 1396 } 1397 for _, c := range cases { 1398 err := c.input.UnmarshalTOML(c.data) 1399 if c.wantErr != nil && (err == nil || err.Error() != c.wantErr.Error()) { 1400 t.Fatalf("%s failed want err %s, got %v", c.name, c.wantErr.Error(), err) 1401 } 1402 if c.wantErr == nil && err != nil { 1403 t.Fatalf("%s failed want err nil, got %v", c.name, err) 1404 } 1405 if !reflect.DeepEqual(c.input, c.want) { 1406 t.Fatalf("%s failed want %v, got %v", c.name, c.want, c.input) 1407 } 1408 } 1409 }