github.com/NVIDIA/aistore@v1.3.23-0.20240517131212-7df6609be51d/bench/tools/aisloader-composer/containers/carbon_config/carbon.conf (about)

     1  [cache]
     2  # Configure carbon directories.
     3  #
     4  # OS environment variables can be used to tell carbon where graphite is
     5  # installed, where to read configuration from and where to write data.
     6  #
     7  #   GRAPHITE_ROOT        - Root directory of the graphite installation.
     8  #                          Defaults to ../
     9  #   GRAPHITE_CONF_DIR    - Configuration directory (where this file lives).
    10  #                          Defaults to $GRAPHITE_ROOT/conf/
    11  #   GRAPHITE_STORAGE_DIR - Storage directory for whisper/rrd/log/pid files.
    12  #                          Defaults to $GRAPHITE_ROOT/storage/
    13  #
    14  # To change other directory paths, add settings to this file. The following
    15  # configuration variables are available with these default values:
    16  #
    17  #   STORAGE_DIR    = $GRAPHITE_STORAGE_DIR
    18  #   LOCAL_DATA_DIR = %(STORAGE_DIR)s/whisper/
    19  #   WHITELISTS_DIR = %(STORAGE_DIR)s/lists/
    20  #   CONF_DIR       = %(STORAGE_DIR)s/conf/
    21  #   LOG_DIR        = %(STORAGE_DIR)s/log/
    22  #   PID_DIR        = %(STORAGE_DIR)s/
    23  #
    24  # For FHS style directory structures, use:
    25  #
    26  #   STORAGE_DIR    = /var/lib/carbon/
    27  #   CONF_DIR       = /etc/carbon/
    28  #   LOG_DIR        = /var/log/carbon/
    29  #   PID_DIR        = /var/run/
    30  #
    31  #LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
    32  
    33  # Specify the database library used to store metric data on disk. Each database
    34  # may have configurable options to change the behavior of how it writes to
    35  # persistent storage.
    36  #
    37  # whisper - Fixed-size database, similar in design and purpose to RRD. This is
    38  # the default storage backend for carbon and the most rigorously tested.
    39  #
    40  # ceres - Experimental alternative database that supports storing data in sparse
    41  # files of arbitrary fixed-size resolutions.
    42  DATABASE = whisper
    43  
    44  # Enable daily log rotation. If disabled, a new file will be opened whenever the log file path no
    45  # longer exists (i.e. it is removed or renamed)
    46  ENABLE_LOGROTATION = True
    47  
    48  # Specify the user to drop privileges to
    49  # If this is blank carbon-cache runs as the user that invokes it
    50  # This user must have write access to the local data directory
    51  USER =
    52  
    53  # Limit the size of the cache to avoid swapping or becoming CPU bound.
    54  # Sorts and serving cache queries gets more expensive as the cache grows.
    55  # Use the value "inf" (infinity) for an unlimited cache size.
    56  # value should be an integer number of metric datapoints.
    57  MAX_CACHE_SIZE = inf
    58  
    59  # Limits the number of whisper update_many() calls per second, which effectively
    60  # means the number of write requests sent to the disk. This is intended to
    61  # prevent over-utilizing the disk and thus starving the rest of the system.
    62  # When the rate of required updates exceeds this, then carbon's caching will
    63  # take effect and increase the overall throughput accordingly.
    64  MAX_UPDATES_PER_SECOND = 2000
    65  
    66  # If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a
    67  # stop/shutdown is initiated.  This helps when MAX_UPDATES_PER_SECOND is
    68  # relatively low and carbon has cached a lot of updates; it enables the carbon
    69  # daemon to shutdown more quickly.
    70  # MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000
    71  
    72  # Softly limits the number of whisper files that get created each minute.
    73  # Setting this value low (e.g. 50) is a good way to ensure that your carbon
    74  # system will not be adversely impacted when a bunch of new metrics are
    75  # sent to it. The trade off is that any metrics received in excess of this
    76  # value will be silently dropped, and the whisper file will not be created
    77  # until such point as a subsequent metric is received and fits within the
    78  # defined rate limit. Setting this value high (like "inf" for infinity) will
    79  # cause carbon to create the files quickly but at the risk of increased I/O.
    80  MAX_CREATES_PER_MINUTE = inf
    81  
    82  # Set the minimum timestamp resolution supported by this instance. This allows
    83  # internal optimisations by overwriting points with equal truncated timestamps
    84  # in order to limit the number of updates to the database. It defaults to one
    85  # second.
    86  MIN_TIMESTAMP_RESOLUTION = 1
    87  
    88  # Set the minimum lag in seconds for a point to be written to the database
    89  # in order to optimize batching. This means that each point will wait at least
    90  # the duration of this lag before being written. Setting this to 0 disable the feature.
    91  # This currently only works when using the timesorted write strategy.
    92  # MIN_TIMESTAMP_LAG = 0
    93  
    94  # Set the interface and port for the line (plain text) listener.  Setting the
    95  # interface to 0.0.0.0 listens on all interfaces.  Port can be set to 0 to
    96  # disable this listener if it is not required.
    97  LINE_RECEIVER_INTERFACE = 0.0.0.0
    98  LINE_RECEIVER_PORT = 2003
    99  
   100  # Set this to True to enable the UDP listener. By default this is off
   101  # because it is very common to run multiple carbon daemons and managing
   102  # another (rarely used) port for every carbon instance is not fun.
   103  ENABLE_UDP_LISTENER = False
   104  UDP_RECEIVER_INTERFACE = 0.0.0.0
   105  UDP_RECEIVER_PORT = 2003
   106  
   107  # Set the interface and port for the pickle listener.  Setting the interface to
   108  # 0.0.0.0 listens on all interfaces.  Port can be set to 0 to disable this
   109  # listener if it is not required.
   110  PICKLE_RECEIVER_INTERFACE = 0.0.0.0
   111  PICKLE_RECEIVER_PORT = 2004
   112  
   113  # Set the interface and port for the protobuf listener.  Setting the interface to
   114  # 0.0.0.0 listens on all interfaces.  Port can be set to 0 to disable this
   115  # listener if it is not required.
   116  # PROTOBUF_RECEIVER_INTERFACE = 0.0.0.0
   117  # PROTOBUF_RECEIVER_PORT = 2005
   118  
   119  # Limit the number of open connections the receiver can handle as any time.
   120  # Default is no limit. Setting up a limit for sites handling high volume
   121  # traffic may be recommended to avoid running out of TCP memory or having
   122  # thousands of TCP connections reduce the throughput of the service.
   123  #MAX_RECEIVER_CONNECTIONS = inf
   124  
   125  # Per security concerns outlined in Bug #817247 the pickle receiver
   126  # will use a more secure and slightly less efficient unpickler.
   127  # Set this to True to revert to the old-fashioned insecure unpickler.
   128  USE_INSECURE_UNPICKLER = False
   129  
   130  CACHE_QUERY_INTERFACE = 0.0.0.0
   131  CACHE_QUERY_PORT = 7002
   132  
   133  # Set this to False to drop datapoints received after the cache
   134  # reaches MAX_CACHE_SIZE. If this is True (the default) then sockets
   135  # over which metrics are received will temporarily stop accepting
   136  # data until the cache size falls below 95% MAX_CACHE_SIZE.
   137  USE_FLOW_CONTROL = True
   138  
   139  # If enabled this setting is used to timeout metric client connection if no
   140  # metrics have been sent in specified time in seconds
   141  #METRIC_CLIENT_IDLE_TIMEOUT = None
   142  
   143  # By default, carbon-cache will log every whisper update and cache hit.
   144  # This can be excessive and degrade performance if logging on the same
   145  # volume as the whisper data is stored.
   146  LOG_UPDATES = False
   147  LOG_CREATES = False
   148  LOG_CACHE_HITS = False
   149  LOG_CACHE_QUEUE_SORTS = False
   150  
   151  # The thread that writes metrics to disk can use one of the following strategies
   152  # determining the order in which metrics are removed from cache and flushed to
   153  # disk. The default option preserves the same behavior as has been historically
   154  # available in version 0.9.10.
   155  #
   156  # sorted - All metrics in the cache will be counted and an ordered list of
   157  # them will be sorted according to the number of datapoints in the cache at the
   158  # moment of the list's creation. Metrics will then be flushed from the cache to
   159  # disk in that order.
   160  #
   161  # timesorted - All metrics in the list will be looked at and sorted according
   162  # to the timestamp of there datapoints. The metric that were the least recently
   163  # written will be written first. This is an hybrid strategy between max and
   164  # sorted which is particularly adapted to sets of metrics with non-uniform
   165  # resolutions.
   166  #
   167  # max - The writer thread will always pop and flush the metric from cache
   168  # that has the most datapoints. This will give a strong flush preference to
   169  # frequently updated metrics and will also reduce random file-io. Infrequently
   170  # updated metrics may only ever be persisted to disk at daemon shutdown if
   171  # there are a large number of metrics which receive very frequent updates OR if
   172  # disk i/o is very slow.
   173  #
   174  # naive - Metrics will be flushed from the cache to disk in an unordered
   175  # fashion. This strategy may be desirable in situations where the storage for
   176  # whisper files is solid state, CPU resources are very limited or deference to
   177  # the OS's i/o scheduler is expected to compensate for the random write
   178  # pattern.
   179  #
   180  CACHE_WRITE_STRATEGY = sorted
   181  
   182  # On some systems it is desirable for whisper to write synchronously.
   183  # Set this option to True if you'd like to try this. Basically it will
   184  # shift the onus of buffering writes from the kernel into carbon's cache.
   185  WHISPER_AUTOFLUSH = False
   186  
   187  # By default new Whisper files are created pre-allocated with the data region
   188  # filled with zeros to prevent fragmentation and speed up contiguous reads and
   189  # writes (which are common). Enabling this option will cause Whisper to create
   190  # the file sparsely instead. Enabling this option may allow a large increase of
   191  # MAX_CREATES_PER_MINUTE but may have longer term performance implications
   192  # depending on the underlying storage configuration.
   193  WHISPER_SPARSE_CREATE = False
   194  
   195  # Only beneficial on linux filesystems that support the fallocate system call.
   196  # It maintains the benefits of contiguous reads/writes, but with a potentially
   197  # much faster creation speed, by allowing the kernel to handle the block
   198  # allocation and zero-ing. Enabling this option may allow a large increase of
   199  # MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported
   200  # this option will gracefully fallback to standard POSIX file access methods.
   201  # If enabled, disables WHISPER_SPARSE_CREATE regardless of the value.
   202  WHISPER_FALLOCATE_CREATE = True
   203  
   204  # Enabling this option will cause Whisper to lock each Whisper file it writes
   205  # to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
   206  # multiple carbon-cache daemons are writing to the same files.
   207  # WHISPER_LOCK_WRITES = False
   208  
   209  # On systems which has a large number of metrics, an amount of Whisper write(2)'s
   210  # pageback sometimes cause disk thrashing due to memory shortage, so that abnormal
   211  # disk reads occur. Enabling this option makes it possible to decrease useless
   212  # page cache memory by posix_fadvise(2) with POSIX_FADVISE_RANDOM option.
   213  # WHISPER_FADVISE_RANDOM = False
   214  
   215  # By default all nodes stored in Ceres are cached in memory to improve the
   216  # throughput of reads and writes to underlying slices. Turning this off will
   217  # greatly reduce memory consumption for databases with millions of metrics, at
   218  # the cost of a steep increase in disk i/o, approximately an extra two os.stat
   219  # calls for every read and write. Reasons to do this are if the underlying
   220  # storage can handle stat() with practically zero cost (SSD, NVMe, zRAM).
   221  # Valid values are:
   222  #       all - all nodes are cached
   223  #      none - node caching is disabled
   224  # CERES_NODE_CACHING_BEHAVIOR = all
   225  
   226  # Ceres nodes can have many slices and caching the right ones can improve
   227  # performance dramatically. Note that there are many trade-offs to tinkering
   228  # with this, and unless you are a ceres developer you *really* should not
   229  # mess with this. Valid values are:
   230  #    latest - only the most recent slice is cached
   231  #       all - all slices are cached
   232  #      none - slice caching is disabled
   233  # CERES_SLICE_CACHING_BEHAVIOR = latest
   234  
   235  # If a Ceres node accumulates too many slices, performance can suffer.
   236  # This can be caused by intermittently reported data. To mitigate
   237  # slice fragmentation there is a tolerance for how much space can be
   238  # wasted within a slice file to avoid creating a new one. That tolerance
   239  # level is determined by MAX_SLICE_GAP, which is the number of consecutive
   240  # null datapoints allowed in a slice file.
   241  # If you set this very low, you will waste less of the *tiny* bit disk space
   242  # that this feature wastes, and you will be prone to performance problems
   243  # caused by slice fragmentation, which can be pretty severe.
   244  # If you set this really high, you will waste a bit more disk space (each
   245  # null datapoint wastes 8 bytes, but keep in mind your filesystem's block
   246  # size). If you suffer slice fragmentation issues, you should increase this or
   247  # run the ceres-maintenance defrag plugin more often. However you should not
   248  # set it to be huge because then if a large but allowed gap occurs it has to
   249  # get filled in, which means instead of a simple 8-byte write to a new file we
   250  # could end up doing an (8 * MAX_SLICE_GAP)-byte write to the latest slice.
   251  # CERES_MAX_SLICE_GAP = 80
   252  
   253  # Enabling this option will cause Ceres to lock each Ceres file it writes to
   254  # to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
   255  # multiple carbon-cache daemons are writing to the same files.
   256  # CERES_LOCK_WRITES = False
   257  
   258  # Set this to True to enable whitelisting and blacklisting of metrics in
   259  # CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
   260  # missing or empty, all metrics will pass through
   261  # USE_WHITELIST = False
   262  
   263  # By default, carbon itself will log statistics (such as a count,
   264  # metricsReceived) with the top level prefix of 'carbon' at an interval of 60
   265  # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
   266  # CARBON_METRIC_PREFIX = carbon
   267  CARBON_METRIC_INTERVAL = 10
   268  
   269  # Enable AMQP if you want to receve metrics using an amqp broker
   270  # ENABLE_AMQP = False
   271  
   272  # Verbose means a line will be logged for every metric received
   273  # useful for testing
   274  # AMQP_VERBOSE = False
   275  
   276  # AMQP_HOST = localhost
   277  # AMQP_PORT = 5672
   278  # AMQP_VHOST = /
   279  # AMQP_USER = guest
   280  # AMQP_PASSWORD = guest
   281  # AMQP_EXCHANGE = graphite
   282  # AMQP_METRIC_NAME_IN_BODY = False
   283  
   284  # The manhole interface allows you to SSH into the carbon daemon
   285  # and get a python interpreter. BE CAREFUL WITH THIS! If you do
   286  # something like time.sleep() in the interpreter, the whole process
   287  # will sleep! This is *extremely* helpful in debugging, assuming
   288  # you are familiar with the code. If you are not, please don't
   289  # mess with this, you are asking for trouble :)
   290  #
   291  # ENABLE_MANHOLE = False
   292  # MANHOLE_INTERFACE = 127.0.0.1
   293  # MANHOLE_PORT = 7222
   294  # MANHOLE_USER = admin
   295  # MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE=
   296  
   297  # Patterns for all of the metrics this machine will store. Read more at
   298  # http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
   299  #
   300  # Example: store all sales, linux servers, and utilization metrics
   301  # BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
   302  #
   303  # Example: store everything
   304  # BIND_PATTERNS = #
   305  
   306  # URL of graphite-web instance, this is used to add incoming series to the tag database
   307  GRAPHITE_URL = http://127.0.0.1:8080
   308  
   309  # Tag support, when enabled carbon will make HTTP calls to graphite-web to update the tag index
   310  ENABLE_TAGS = True
   311  
   312  # Tag update interval, this specifies how frequently updates to existing series will trigger
   313  # an update to the tag index, the default setting is once every 100 updates
   314  # TAG_UPDATE_INTERVAL = 100
   315  
   316  # Tag hash filenames, this specifies whether tagged metric filenames should use the hash of the metric name
   317  # or a human-readable name, using hashed names avoids issues with path length when using a large number of tags
   318  # TAG_HASH_FILENAMES = True
   319  
   320  # Tag batch size, this specifies the maximum number of series to be sent to graphite-web in a single batch
   321  # TAG_BATCH_SIZE = 100
   322  
   323  # Tag queue size, this specifies the maximum number of series to be queued for sending to graphite-web
   324  # There are separate queues for new series and for updates to existing series
   325  # TAG_QUEUE_SIZE = 10000
   326  
   327  # Set to enable Sentry.io exception monitoring.
   328  # RAVEN_DSN='YOUR_DSN_HERE'.
   329  
   330  # To configure special settings for the carbon-cache instance 'b', uncomment this:
   331  #[cache:b]
   332  #LINE_RECEIVER_PORT = 2103
   333  #PICKLE_RECEIVER_PORT = 2104
   334  #CACHE_QUERY_PORT = 7102
   335  # and any other settings you want to customize, defaults are inherited
   336  # from the [cache] section.
   337  # You can then specify the --instance=b option to manage this instance
   338  #
   339  # In order to turn on logging of successful connections for the line
   340  # receiver, set this to True
   341  LOG_LISTENER_CONN_SUCCESS = False
   342  
   343  [relay]
   344  LINE_RECEIVER_INTERFACE = 0.0.0.0
   345  LINE_RECEIVER_PORT = 2013
   346  PICKLE_RECEIVER_INTERFACE = 0.0.0.0
   347  PICKLE_RECEIVER_PORT = 2014
   348  
   349  # Carbon-relay has several options for metric routing controlled by RELAY_METHOD
   350  #
   351  # Use relay-rules.conf to route metrics to destinations based on pattern rules
   352  #RELAY_METHOD = rules
   353  #
   354  # Use consistent-hashing for even distribution of metrics between destinations
   355  #RELAY_METHOD = consistent-hashing
   356  #
   357  # Use consistent-hashing but take into account an aggregation-rules.conf shared
   358  # by downstream carbon-aggregator daemons. This will ensure that all metrics
   359  # that map to a given aggregation rule are sent to the same carbon-aggregator
   360  # instance.
   361  # Enable this for carbon-relays that send to a group of carbon-aggregators
   362  #RELAY_METHOD = aggregated-consistent-hashing
   363  #
   364  # You can also use fast-hashing and fast-aggregated-hashing which are in O(1)
   365  # and will always redirect the metrics to the same destination but do not try
   366  # to minimize rebalancing when the list of destinations is changing.
   367  RELAY_METHOD = rules
   368  
   369  # If you use consistent-hashing you can add redundancy by replicating every
   370  # datapoint to more than one machine.
   371  REPLICATION_FACTOR = 1
   372  
   373  # For REPLICATION_FACTOR >=2, set DIVERSE_REPLICAS to True to guarantee replicas
   374  # across distributed hosts. With this setting disabled, it's possible that replicas
   375  # may be sent to different caches on the same host. This has been the default
   376  # behavior since introduction of 'consistent-hashing' relay method.
   377  # Note that enabling this on an existing pre-0.9.14 cluster will require rebalancing
   378  # your metrics across the cluster nodes using a tool like Carbonate.
   379  #DIVERSE_REPLICAS = True
   380  
   381  # This is a list of carbon daemons we will send any relayed or
   382  # generated metrics to. The default provided would send to a single
   383  # carbon-cache instance on the default port. However if you
   384  # use multiple carbon-cache instances then it would look like this:
   385  #
   386  # DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
   387  #
   388  # The general form is IP:PORT:INSTANCE where the :INSTANCE part is
   389  # optional and refers to the "None" instance if omitted.
   390  #
   391  # Note that if the destinations are all carbon-caches then this should
   392  # exactly match the webapp's CARBONLINK_HOSTS setting in terms of
   393  # instances listed (order matters!).
   394  #
   395  # If using RELAY_METHOD = rules, all destinations used in relay-rules.conf
   396  # must be defined in this list
   397  DESTINATIONS = 127.0.0.1:2004
   398  
   399  # This define the protocol to use to contact the destination. It can be
   400  # set to one of "line", "pickle", "udp" and "protobuf". This list can be
   401  # extended with CarbonClientFactory plugins and defaults to "pickle".
   402  # DESTINATION_PROTOCOL = pickle
   403  
   404  # This defines the wire transport, either none or ssl.
   405  # If SSL is used any TCP connection will be upgraded to TLS1.  The system's
   406  # trust authority will be used unless DESTINATION_SSL_CA is specified in
   407  # which case an alternative certificate authority chain will be used for
   408  # verifying the remote certificate.
   409  # To use SSL you'll need the cryptography, service_identity, and twisted >= 14
   410  # DESTINATION_TRANSPORT = none
   411  # DESTINATION_SSL_CA=/path/to/private-ca.crt
   412  
   413  # This allows to have multiple connections per destinations, this will
   414  # pool all the replicas of a single host in the same queue and distribute
   415  # points across these replicas instead of replicating them.
   416  # The following example will balance the load between :0 and :1.
   417  ## DESTINATIONS = foo:2001:0, foo:2001:1
   418  ## RELAY_METHOD = rules
   419  # Note: this is currently incompatible with USE_RATIO_RESET which gets
   420  # disabled if  this option is enabled.
   421  # DESTINATIONS_POOL_REPLICAS = False
   422  
   423  # When using consistent hashing it sometime makes sense to make
   424  # the ring dynamic when you don't want to loose points when a
   425  # single destination is down. Replication is an answer to that
   426  # but it can be quite expensive.
   427  # DYNAMIC_ROUTER = False
   428  
   429  # Controls the number of connection attempts before marking a
   430  # destination as down. We usually do one connection attempt per
   431  # second.
   432  # DYNAMIC_ROUTER_MAX_RETRIES = 5
   433  
   434  # This is the maximum number of datapoints that can be queued up
   435  # for a single destination. Once this limit is hit, we will
   436  # stop accepting new data if USE_FLOW_CONTROL is True, otherwise
   437  # we will drop any subsequently received datapoints.
   438  MAX_QUEUE_SIZE = 10000
   439  
   440  # This defines the maximum "message size" between carbon daemons.  If
   441  # your queue is large, setting this to a lower number will cause the
   442  # relay to forward smaller discrete chunks of stats, which may prevent
   443  # overloading on the receiving side after a disconnect.
   444  MAX_DATAPOINTS_PER_MESSAGE = 500
   445  
   446  # Limit the number of open connections the receiver can handle as any time.
   447  # Default is no limit. Setting up a limit for sites handling high volume
   448  # traffic may be recommended to avoid running out of TCP memory or having
   449  # thousands of TCP connections reduce the throughput of the service.
   450  #MAX_RECEIVER_CONNECTIONS = inf
   451  
   452  # Specify the user to drop privileges to
   453  # If this is blank carbon-relay runs as the user that invokes it
   454  # USER =
   455  
   456  # This is the percentage that the queue must be empty before it will accept
   457  # more messages.  For a larger site, if the queue is very large it makes sense
   458  # to tune this to allow for incoming stats.  So if you have an average
   459  # flow of 100k stats/minute, and a MAX_QUEUE_SIZE of 3,000,000, it makes sense
   460  # to allow stats to start flowing when you've cleared the queue to 95% since
   461  # you should have space to accommodate the next minute's worth of stats
   462  # even before the relay incrementally clears more of the queue
   463  QUEUE_LOW_WATERMARK_PCT = 0.8
   464  
   465  # To allow for batch efficiency from the pickle protocol and to benefit from
   466  # other batching advantages, all writes are deferred by putting them into a queue,
   467  # and then the queue is flushed and sent a small fraction of a second later.
   468  TIME_TO_DEFER_SENDING = 0.0001
   469  
   470  # Set this to False to drop datapoints when any send queue (sending datapoints
   471  # to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
   472  # default) then sockets over which metrics are received will temporarily stop accepting
   473  # data until the send queues fall below QUEUE_LOW_WATERMARK_PCT * MAX_QUEUE_SIZE.
   474  USE_FLOW_CONTROL = True
   475  
   476  # If enabled this setting is used to timeout metric client connection if no
   477  # metrics have been sent in specified time in seconds
   478  #METRIC_CLIENT_IDLE_TIMEOUT = None
   479  
   480  # Set this to True to enable whitelisting and blacklisting of metrics in
   481  # CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
   482  # missing or empty, all metrics will pass through
   483  # USE_WHITELIST = False
   484  
   485  # By default, carbon itself will log statistics (such as a count,
   486  # metricsReceived) with the top level prefix of 'carbon' at an interval of 60
   487  # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
   488  # CARBON_METRIC_PREFIX = carbon
   489  CARBON_METRIC_INTERVAL = 20
   490  #
   491  # In order to turn on logging of successful connections for the line
   492  # receiver, set this to True
   493  LOG_LISTENER_CONN_SUCCESS = False
   494  
   495  # If you're connecting from the relay to a destination that's over the
   496  # internet or similarly iffy connection, a backlog can develop because
   497  # of internet weather conditions, e.g. acks getting lost or similar issues.
   498  # To deal with that, you can enable USE_RATIO_RESET which will let you
   499  # re-set the connection to an individual destination.  Defaults to being off.
   500  USE_RATIO_RESET=False
   501  
   502  # When there is a small number of stats flowing, it's not desirable to
   503  # perform any actions based on percentages - it's just too "twitchy".
   504  MIN_RESET_STAT_FLOW=1000
   505  
   506  # When the ratio of stats being sent in a reporting interval is far
   507  # enough from 1.0, we will disconnect the socket and reconnecto to
   508  # clear out queued stats.  The default ratio of 0.9 indicates that 10%
   509  # of stats aren't being delivered within one CARBON_METRIC_INTERVAL
   510  # (default of 60 seconds), which can lead to a queue backup.  Under
   511  # some circumstances re-setting the connection can fix this, so
   512  # set this according to your tolerance, and look in the logs for
   513  # "resetConnectionForQualityReasons" to observe whether this is kicking
   514  # in when your sent queue is building up.
   515  MIN_RESET_RATIO=0.9
   516  
   517  # The minimum time between resets.  When a connection is re-set, we
   518  # need to wait before another reset is performed.
   519  # (2*CARBON_METRIC_INTERVAL) + 1 second is the minimum time needed
   520  # before stats for the new connection will be available.  Setting this
   521  # below (2*CARBON_METRIC_INTERVAL) + 1 second will result in a lot of
   522  # reset connections for no good reason.
   523  MIN_RESET_INTERVAL=121
   524  
   525  # Enable TCP Keep Alive (http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html).
   526  # Default settings will send a probe every 30s. Default is False.
   527  # TCP_KEEPALIVE=True
   528  # The interval between the last data packet sent (simple ACKs are not
   529  # considered data) and the first keepalive probe; after the connection is marked
   530  # to need keepalive, this counter is not used any further.
   531  # TCP_KEEPIDLE=10
   532  # The interval between subsequential keepalive probes, regardless of what
   533  # the connection has exchanged in the meantime.
   534  # TCP_KEEPINTVL=30
   535  # The number of unacknowledged probes to send before considering the connection
   536  # dead and notifying the application layer.
   537  # TCP_KEEPCNT=2
   538  
   539  
   540  [aggregator]
   541  LINE_RECEIVER_INTERFACE = 0.0.0.0
   542  LINE_RECEIVER_PORT = 2023
   543  
   544  PICKLE_RECEIVER_INTERFACE = 0.0.0.0
   545  PICKLE_RECEIVER_PORT = 2024
   546  
   547  # If set true, metric received will be forwarded to DESTINATIONS in addition to
   548  # the output of the aggregation rules. If set false the carbon-aggregator will
   549  # only ever send the output of aggregation.
   550  FORWARD_ALL = True
   551  
   552  # Filenames of the configuration files to use for this instance of aggregator.
   553  # Filenames are relative to CONF_DIR.
   554  #
   555  # AGGREGATION_RULES = aggregation-rules.conf
   556  # REWRITE_RULES = rewrite-rules.conf
   557  
   558  # This is a list of carbon daemons we will send any relayed or
   559  # generated metrics to. The default provided would send to a single
   560  # carbon-cache instance on the default port. However if you
   561  # use multiple carbon-cache instances then it would look like this:
   562  #
   563  # DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
   564  #
   565  # The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is
   566  # optional and refers to the "None" instance if omitted.
   567  #
   568  # Note that if the destinations are all carbon-caches then this should
   569  # exactly match the webapp's CARBONLINK_HOSTS setting in terms of
   570  # instances listed (order matters!).
   571  DESTINATIONS = 127.0.0.1:2004
   572  
   573  # If you want to add redundancy to your data by replicating every
   574  # datapoint to more than one machine, increase this.
   575  REPLICATION_FACTOR = 1
   576  
   577  # This is the maximum number of datapoints that can be queued up
   578  # for a single destination. Once this limit is hit, we will
   579  # stop accepting new data if USE_FLOW_CONTROL is True, otherwise
   580  # we will drop any subsequently received datapoints.
   581  MAX_QUEUE_SIZE = 10000
   582  
   583  # Set this to False to drop datapoints when any send queue (sending datapoints
   584  # to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
   585  # default) then sockets over which metrics are received will temporarily stop accepting
   586  # data until the send queues fall below 80% MAX_QUEUE_SIZE.
   587  USE_FLOW_CONTROL = True
   588  
   589  # If enabled this setting is used to timeout metric client connection if no
   590  # metrics have been sent in specified time in seconds
   591  #METRIC_CLIENT_IDLE_TIMEOUT = None
   592  
   593  # This defines the maximum "message size" between carbon daemons.
   594  # You shouldn't need to tune this unless you really know what you're doing.
   595  MAX_DATAPOINTS_PER_MESSAGE = 500
   596  
   597  # This defines how many datapoints the aggregator remembers for
   598  # each metric. Aggregation only happens for datapoints that fall in
   599  # the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
   600  MAX_AGGREGATION_INTERVALS = 5
   601  
   602  # Limit the number of open connections the receiver can handle as any time.
   603  # Default is no limit. Setting up a limit for sites handling high volume
   604  # traffic may be recommended to avoid running out of TCP memory or having
   605  # thousands of TCP connections reduce the throughput of the service.
   606  #MAX_RECEIVER_CONNECTIONS = inf
   607  
   608  # By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back
   609  # aggregated data points once every rule.frequency seconds, on a per-rule basis.
   610  # Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points
   611  # every N seconds, independent of rule frequency. This is useful, for example,
   612  # to be able to query partially aggregated metrics from carbon-cache without
   613  # having to first wait rule.frequency seconds.
   614  # WRITE_BACK_FREQUENCY = 0
   615  
   616  # Set this to True to enable whitelisting and blacklisting of metrics in
   617  # CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
   618  # missing or empty, all metrics will pass through
   619  # USE_WHITELIST = False
   620  
   621  # By default, carbon itself will log statistics (such as a count,
   622  # metricsReceived) with the top level prefix of 'carbon' at an interval of 60
   623  # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
   624  # CARBON_METRIC_PREFIX = carbon
   625  CARBON_METRIC_INTERVAL = 20
   626  
   627  # In order to turn off logging of successful connections for the line
   628  # receiver, set this to True
   629  LOG_LISTENER_CONN_SUCCESS = False
   630  
   631  # In order to turn off logging of metrics with no corresponding
   632  # aggregation rules receiver, set this to False
   633  # LOG_AGGREGATOR_MISSES = False
   634  
   635  # Specify the user to drop privileges to
   636  # If this is blank carbon-aggregator runs as the user that invokes it
   637  # USER =
   638  
   639  # Part of the code, and particularly aggregator rules, need
   640  # to cache metric names. To avoid leaking too much memory you
   641  # can tweak the size of this cache. The default allow for 1M
   642  # different metrics per rule (~200MiB).
   643  # CACHE_METRIC_NAMES_MAX=1000000
   644  
   645  # You can optionally set a ttl to this cache.
   646  # CACHE_METRIC_NAMES_TTL=600