github.com/vipernet-xyz/tm@v0.34.24/networks/remote/ansible/roles/logzio/templates/journalbeat.yml.j2 (about)

     1  #======================== Journalbeat Configuration ============================
     2  
     3  journalbeat:
     4    # What position in journald to seek to at start up
     5    # options: cursor, tail, head (defaults to tail)
     6    #seek_position: tail
     7  
     8    # If seek_position is set to cursor and seeking to cursor fails
     9    # fall back to this method.  If set to none will it will exit
    10    # options: tail, head, none (defaults to tail)
    11    #cursor_seek_fallback: tail
    12  
    13    # Store the cursor of the successfully published events
    14    #write_cursor_state: true
    15  
    16    # Path to the file to store the cursor (defaults to ".journalbeat-cursor-state")
    17    #cursor_state_file: .journalbeat-cursor-state
    18  
    19    # How frequently should we save the cursor to disk (defaults to 5s)
    20    #cursor_flush_period: 5s
    21  
    22    # Path to the file to store the queue of events pending (defaults to ".journalbeat-pending-queue")
    23    #pending_queue.file: .journalbeat-pending-queue
    24  
    25    # How frequently should we save the queue to disk (defaults to 1s).
    26    # Pending queue represents the WAL of events queued to be published
    27    # or being published and waiting for acknowledgement. In case of a
    28    # regular restart of journalbeat all the events not yet acknowledged
    29    # will be flushed to disk during the shutdown.
    30    # In case of disaster most probably journalbeat won't get a chance to shutdown
    31    # itself gracefully and this flush period option will serve you as a
    32    # backup creation frequency option.
    33    #pending_queue.flush_period: 1s
    34  
    35    # Lowercase and remove leading underscores, e.g. "_MESSAGE" -> "message"
    36    # (defaults to false)
    37    #clean_field_names: false
    38  
    39    # All journal entries are strings by default. You can try to convert them to numbers.
    40    # (defaults to false)
    41    #convert_to_numbers: false
    42  
    43    # Store all the fields of the Systemd Journal entry under this field
    44    # Can be almost any string suitable to be a field name of an ElasticSearch document.
    45    # Dots can be used to create nested fields.
    46    # Two exceptions:
    47    #  - no repeated dots;
    48    #  - no trailing dots, e.g. "journal..field_name." will fail
    49    # (defaults to "" hence stores on the upper level of the event)
    50    #move_metadata_to_field: ""
    51  
    52    # Specific units to monitor.
    53    units: ["{{service}}.service"]
    54  
    55    # Specify Journal paths to open. You can pass an array of paths to Systemd Journal paths.
    56    # If you want to open Journal from directory just pass an array consisting of one element
    57    # representing the path. See: https://www.freedesktop.org/software/systemd/man/sd_journal_open.html
    58    # By default this setting is empty thus journalbeat will attempt to find all journal files automatically
    59    #journal_paths: ["/var/log/journal"]
    60  
    61    #default_type: journal
    62  
    63  #================================ General ======================================
    64  
    65  # The name of the shipper that publishes the network data. It can be used to group
    66  # all the transactions sent by a single shipper in the web interface.
    67  # If this options is not defined, the hostname is used.
    68  #name: journalbeat
    69  
    70  # The tags of the shipper are included in their own field with each
    71  # transaction published. Tags make it easy to group servers by different
    72  # logical properties.
    73  tags: ["{{service}}"]
    74  
    75  # Optional fields that you can specify to add additional information to the
    76  # output. Fields can be scalar values, arrays, dictionaries, or any nested
    77  # combination of these.
    78  fields:
    79    logzio_codec: plain
    80    token: {{LOGZIO_TOKEN}}
    81  
    82  # If this option is set to true, the custom fields are stored as top-level
    83  # fields in the output document instead of being grouped under a fields
    84  # sub-dictionary. Default is false.
    85  fields_under_root: true
    86  
    87  # Internal queue size for single events in processing pipeline
    88  #queue_size: 1000
    89  
    90  # The internal queue size for bulk events in the processing pipeline.
    91  # Do not modify this value.
    92  #bulk_queue_size: 0
    93  
    94  # Sets the maximum number of CPUs that can be executing simultaneously. The
    95  # default is the number of logical CPUs available in the system.
    96  #max_procs:
    97  
    98  #================================ Processors ===================================
    99  
   100  # Processors are used to reduce the number of fields in the exported event or to
   101  # enhance the event with external metadata. This section defines a list of
   102  # processors that are applied one by one and the first one receives the initial
   103  # event:
   104  #
   105  #   event -> filter1 -> event1 -> filter2 ->event2 ...
   106  #
   107  # The supported processors are drop_fields, drop_event, include_fields, and
   108  # add_cloud_metadata.
   109  #
   110  # For example, you can use the following processors to keep the fields that
   111  # contain CPU load percentages, but remove the fields that contain CPU ticks
   112  # values:
   113  #
   114  processors:
   115  #- include_fields:
   116  #    fields: ["cpu"]
   117  - drop_fields:
   118      fields: ["beat.name", "beat.version", "logzio_codec", "SYSLOG_IDENTIFIER", "SYSLOG_FACILITY", "PRIORITY"]
   119  #
   120  # The following example drops the events that have the HTTP response code 200:
   121  #
   122  #processors:
   123  #- drop_event:
   124  #    when:
   125  #       equals:
   126  #           http.code: 200
   127  #
   128  # The following example enriches each event with metadata from the cloud
   129  # provider about the host machine. It works on EC2, GCE, and DigitalOcean.
   130  #
   131  #processors:
   132  #- add_cloud_metadata:
   133  #
   134  
   135  #================================ Outputs ======================================
   136  
   137  # Configure what outputs to use when sending the data collected by the beat.
   138  # Multiple outputs may be used.
   139  
   140  #----------------------------- Logstash output ---------------------------------
   141  output.logstash:
   142    # Boolean flag to enable or disable the output module.
   143    enabled: true
   144  
   145    # The Logstash hosts
   146    hosts: ["listener.logz.io:5015"]
   147  
   148    # Number of workers per Logstash host.
   149    #worker: 1
   150  
   151    # Set gzip compression level.
   152    #compression_level: 3
   153  
   154    # Optional load balance the events between the Logstash hosts
   155    #loadbalance: true
   156  
   157    # Number of batches to be send asynchronously to logstash while processing
   158    # new batches.
   159    #pipelining: 0
   160  
   161    # Optional index name. The default index name is set to name of the beat
   162    # in all lowercase.
   163    #index: 'beatname'
   164  
   165    # SOCKS5 proxy server URL
   166    #proxy_url: socks5://user:password@socks5-server:2233
   167  
   168    # Resolve names locally when using a proxy server. Defaults to false.
   169    #proxy_use_local_resolver: false
   170  
   171    # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
   172    ssl.enabled: true
   173  
   174    # Configure SSL verification mode. If `none` is configured, all server hosts
   175    # and certificates will be accepted. In this mode, SSL based connections are
   176    # susceptible to man-in-the-middle attacks. Use only for testing. Default is
   177    # `full`.
   178    ssl.verification_mode: full
   179  
   180    # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
   181    # 1.2 are enabled.
   182    #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
   183  
   184    # Optional SSL configuration options. SSL is off by default.
   185    # List of root certificates for HTTPS server verifications
   186    ssl.certificate_authorities: ["/etc/pki/tls/certs/COMODORSADomainValidationSecureServerCA.crt"]
   187  
   188    # Certificate for SSL client authentication
   189    #ssl.certificate: "/etc/pki/client/cert.pem"
   190  
   191    # Client Certificate Key
   192    #ssl.key: "/etc/pki/client/cert.key"
   193  
   194    # Optional passphrase for decrypting the Certificate Key.
   195    #ssl.key_passphrase: ''
   196  
   197    # Configure cipher suites to be used for SSL connections
   198    #ssl.cipher_suites: []
   199  
   200    # Configure curve types for ECDHE based cipher suites
   201    #ssl.curve_types: []
   202  
   203  #------------------------------- File output -----------------------------------
   204  #output.file:
   205    # Boolean flag to enable or disable the output module.
   206    #enabled: true
   207  
   208    # Path to the directory where to save the generated files. The option is
   209    # mandatory.
   210    #path: "/tmp/beatname"
   211  
   212    # Name of the generated files. The default is `beatname` and it generates
   213    # files: `beatname`, `beatname.1`, `beatname.2`, etc.
   214    #filename: beatname
   215  
   216    # Maximum size in kilobytes of each file. When this size is reached, and on
   217    # every beatname restart, the files are rotated. The default value is 10240
   218    # kB.
   219    #rotate_every_kb: 10000
   220  
   221    # Maximum number of files under path. When this number of files is reached,
   222    # the oldest file is deleted and the rest are shifted from last to first. The
   223    # default is 7 files.
   224    #number_of_files: 7
   225  
   226  
   227  #----------------------------- Console output ---------------------------------
   228  #output.console:
   229    # Boolean flag to enable or disable the output module.
   230    #enabled: true
   231  
   232    # Pretty print json event
   233    #pretty: false
   234  
   235  #================================= Paths ======================================
   236  
   237  # The home path for the beatname installation. This is the default base path
   238  # for all other path settings and for miscellaneous files that come with the
   239  # distribution (for example, the sample dashboards).
   240  # If not set by a CLI flag or in the configuration file, the default for the
   241  # home path is the location of the binary.
   242  #path.home:
   243  
   244  # The configuration path for the beatname installation. This is the default
   245  # base path for configuration files, including the main YAML configuration file
   246  # and the Elasticsearch template file. If not set by a CLI flag or in the
   247  # configuration file, the default for the configuration path is the home path.
   248  #path.config: ${path.home}
   249  
   250  # The data path for the beatname installation. This is the default base path
   251  # for all the files in which beatname needs to store its data. If not set by a
   252  # CLI flag or in the configuration file, the default for the data path is a data
   253  # subdirectory inside the home path.
   254  #path.data: ${path.home}/data
   255  
   256  # The logs path for a beatname installation. This is the default location for
   257  # the Beat's log files. If not set by a CLI flag or in the configuration file,
   258  # the default for the logs path is a logs subdirectory inside the home path.
   259  #path.logs: ${path.home}/logs
   260  
   261  #============================== Dashboards =====================================
   262  # These settings control loading the sample dashboards to the Kibana index. Loading
   263  # the dashboards is disabled by default and can be enabled either by setting the
   264  # options here, or by using the `-setup` CLI flag.
   265  #dashboards.enabled: false
   266  
   267  # The URL from where to download the dashboards archive. By default this URL
   268  # has a value which is computed based on the Beat name and version. For released
   269  # versions, this URL points to the dashboard archive on the artifacts.elastic.co
   270  # website.
   271  #dashboards.url:
   272  
   273  # The directory from where to read the dashboards. It is used instead of the URL
   274  # when it has a value.
   275  #dashboards.directory:
   276  
   277  # The file archive (zip file) from where to read the dashboards. It is used instead
   278  # of the URL when it has a value.
   279  #dashboards.file:
   280  
   281  # If this option is enabled, the snapshot URL is used instead of the default URL.
   282  #dashboards.snapshot: false
   283  
   284  # The URL from where to download the snapshot version of the dashboards. By default
   285  # this has a value which is computed based on the Beat name and version.
   286  #dashboards.snapshot_url
   287  
   288  # In case the archive contains the dashboards from multiple Beats, this lets you
   289  # select which one to load. You can load all the dashboards in the archive by
   290  # setting this to the empty string.
   291  #dashboards.beat: beatname
   292  
   293  # The name of the Kibana index to use for setting the configuration. Default is ".kibana"
   294  #dashboards.kibana_index: .kibana
   295  
   296  # The Elasticsearch index name. This overwrites the index name defined in the
   297  # dashboards and index pattern. Example: testbeat-*
   298  #dashboards.index:
   299  
   300  #================================ Logging ======================================
   301  # There are three options for the log output: syslog, file, stderr.
   302  # Under Windows systems, the log files are per default sent to the file output,
   303  # under all other system per default to syslog.
   304  
   305  # Sets log level. The default log level is info.
   306  # Available log levels are: critical, error, warning, info, debug
   307  #logging.level: info
   308  
   309  # Enable debug output for selected components. To enable all selectors use ["*"]
   310  # Other available selectors are "beat", "publish", "service"
   311  # Multiple selectors can be chained.
   312  #logging.selectors: [ ]
   313  
   314  # Send all logging output to syslog. The default is false.
   315  #logging.to_syslog: true
   316  
   317  # If enabled, beatname periodically logs its internal metrics that have changed
   318  # in the last period. For each metric that changed, the delta from the value at
   319  # the beginning of the period is logged. Also, the total values for
   320  # all non-zero internal metrics are logged on shutdown. The default is true.
   321  #logging.metrics.enabled: true
   322  
   323  # The period after which to log the internal metrics. The default is 30s.
   324  #logging.metrics.period: 30s
   325  
   326  # Logging to rotating files files. Set logging.to_files to false to disable logging to
   327  # files.
   328  logging.to_files: true
   329  logging.files:
   330    # Configure the path where the logs are written. The default is the logs directory
   331    # under the home path (the binary location).
   332    #path: /var/log/beatname
   333  
   334    # The name of the files where the logs are written to.
   335    #name: beatname
   336  
   337    # Configure log file size limit. If limit is reached, log file will be
   338    # automatically rotated
   339    #rotateeverybytes: 10485760 # = 10MB
   340  
   341    # Number of rotated log files to keep. Oldest files will be deleted first.
   342    #keepfiles: 7