github.com/pingcap/ticdc@v0.0.0-20220526033649-485a10ef2652/docker/config/tikv.toml (about)

     1  # TiKV config template
     2  #  Human-readable big numbers:
     3  #   File size(based on byte): KB, MB, GB, TB, PB
     4  #    e.g.: 1_048_576 = "1MB"
     5  #   Time(based on ms): ms, s, m, h
     6  #    e.g.: 78_000 = "1.3m"
     7  
     8  # log level: trace, debug, info, warn, error, off.
     9  log-level = "error"
    10  # file to store log, write to stderr if it's empty.
    11  # log-file = ""
    12  
    13  [readpool.storage]
    14  # size of thread pool for high-priority operations
    15  # high-concurrency = 4
    16  # size of thread pool for normal-priority operations
    17  # normal-concurrency = 4
    18  # size of thread pool for low-priority operations
    19  # low-concurrency = 4
    20  # max running high-priority operations, reject if exceed
    21  # max-tasks-high = 8000
    22  # max running normal-priority operations, reject if exceed
    23  # max-tasks-normal = 8000
    24  # max running low-priority operations, reject if exceed
    25  # max-tasks-low = 8000
    26  # size of stack size for each thread pool
    27  # stack-size = "10MB"
    28  
    29  [readpool.coprocessor]
    30  # Notice: if CPU_NUM > 8, default thread pool size for coprocessors
    31  # will be set to CPU_NUM * 0.8.
    32  
    33  # high-concurrency = 8
    34  # normal-concurrency = 8
    35  # low-concurrency = 8
    36  # max-tasks-high = 16000
    37  # max-tasks-normal = 16000
    38  # max-tasks-low = 16000
    39  # stack-size = "10MB"
    40  
    41  [server]
    42  # set listening address.
    43  # addr = "127.0.0.1:20160"
    44  # set advertise listening address for client communication, if not set, use addr instead.
    45  # advertise-addr = ""
    46  # notify capacity, 40960 is suitable for about 7000 regions.
    47  # notify-capacity = 40960
    48  # maximum number of messages can be processed in one tick.
    49  # messages-per-tick = 4096
    50  
    51  # compression type for grpc channel, available values are no, deflate and gzip.
    52  # grpc-compression-type = "no"
    53  # size of thread pool for grpc server.
    54  # grpc-concurrency = 4
    55  # The number of max concurrent streams/requests on a client connection.
    56  # grpc-concurrent-stream = 1024
    57  # The number of connections with each tikv server to send raft messages.
    58  # grpc-raft-conn-num = 10
    59  # Amount to read ahead on individual grpc streams.
    60  # grpc-stream-initial-window-size = "2MB"
    61  
    62  # How many snapshots can be sent concurrently.
    63  # concurrent-send-snap-limit = 32
    64  # How many snapshots can be recv concurrently.
    65  # concurrent-recv-snap-limit = 32
    66  
    67  # max count of tasks being handled, new tasks will be rejected.
    68  # end-point-max-tasks = 2000
    69  
    70  # max recursion level allowed when decoding dag expression
    71  # end-point-recursion-limit = 1000
    72  
    73  # max time to handle coprocessor request before timeout
    74  # end-point-request-max-handle-duration = "60s"
    75  
    76  # the max bytes that snapshot can be written to disk in one second,
    77  # should be set based on your disk performance
    78  # snap-max-write-bytes-per-sec = "100MB"
    79  
    80  # set attributes about this server, e.g. { zone = "us-west-1", disk = "ssd" }.
    81  # labels = {}
    82  
    83  [storage]
    84  # set the path to rocksdb directory.
    85  # data-dir = "/tmp/tikv/store"
    86  
    87  # notify capacity of scheduler's channel
    88  # scheduler-notify-capacity = 10240
    89  
    90  # maximum number of messages can be processed in one tick
    91  # scheduler-messages-per-tick = 1024
    92  
    93  # the number of slots in scheduler latches, concurrency control for write.
    94  # scheduler-concurrency = 2048000
    95  
    96  # scheduler's worker pool size, should increase it in heavy write cases,
    97  # also should less than total cpu cores.
    98  # scheduler-worker-pool-size = 4
    99  
   100  # When the pending write bytes exceeds this threshold,
   101  # the "scheduler too busy" error is displayed.
   102  # scheduler-pending-write-threshold = "100MB"
   103  
   104  [pd]
   105  # pd endpoints
   106  # endpoints = []
   107  
   108  [metric]
   109  # the Prometheus client push interval. Setting the value to 0s stops Prometheus client from pushing.
   110  # interval = "15s"
   111  # the Prometheus pushgateway address. Leaving it empty stops Prometheus client from pushing.
   112  address = "pushgateway:9091"
   113  # the Prometheus client push job name. Note: A node id will automatically append, e.g., "tikv_1".
   114  # job = "tikv"
   115  
   116  [raftstore]
   117  # true (default value) for high reliability, this can prevent data loss when power failure.
   118  # sync-log = true
   119  
   120  # set the path to raftdb directory, default value is data-dir/raft
   121  # raftdb-path = ""
   122  
   123  # set store capacity, if no set, use disk capacity.
   124  # capacity = 0
   125  
   126  # notify capacity, 40960 is suitable for about 7000 regions.
   127  # notify-capacity = 40960
   128  
   129  # maximum number of messages can be processed in one tick.
   130  # messages-per-tick = 4096
   131  
   132  # Region heartbeat tick interval for reporting to pd.
   133  # pd-heartbeat-tick-interval = "60s"
   134  # Store heartbeat tick interval for reporting to pd.
   135  # pd-store-heartbeat-tick-interval = "10s"
   136  
   137  # When region size changes exceeds region-split-check-diff, we should check
   138  # whether the region should be split or not.
   139  # region-split-check-diff = "6MB"
   140  
   141  # Interval to check region whether need to be split or not.
   142  # split-region-check-tick-interval = "10s"
   143  
   144  # When raft entry exceed the max size, reject to propose the entry.
   145  # raft-entry-max-size = "8MB"
   146  
   147  # Interval to gc unnecessary raft log.
   148  # raft-log-gc-tick-interval = "10s"
   149  # A threshold to gc stale raft log, must >= 1.
   150  # raft-log-gc-threshold = 50
   151  # When entry count exceed this value, gc will be forced trigger.
   152  # raft-log-gc-count-limit = 72000
   153  # When the approximate size of raft log entries exceed this value, gc will be forced trigger.
   154  # It's recommanded to set it to 3/4 of region-split-size.
   155  # raft-log-gc-size-limit = "72MB"
   156  
   157  # When a peer hasn't been active for max-peer-down-duration,
   158  # we will consider this peer to be down and report it to pd.
   159  # max-peer-down-duration = "5m"
   160  
   161  # Interval to check whether start manual compaction for a region,
   162  # region-compact-check-interval = "5m"
   163  # Number of regions for each time to check.
   164  # region-compact-check-step = 100
   165  # The minimum number of delete tombstones to trigger manual compaction.
   166  # region-compact-min-tombstones = 10000
   167  # Interval to check whether should start a manual compaction for lock column family,
   168  # if written bytes reach lock-cf-compact-threshold for lock column family, will fire
   169  # a manual compaction for lock column family.
   170  # lock-cf-compact-interval = "10m"
   171  # lock-cf-compact-bytes-threshold = "256MB"
   172  
   173  # Interval (s) to check region whether the data are consistent.
   174  # consistency-check-interval = 0
   175  
   176  # Use delete range to drop a large number of continuous keys.
   177  # use-delete-range = false
   178  
   179  # delay time before deleting a stale peer
   180  # clean-stale-peer-delay = "10m"
   181  
   182  # Interval to cleanup import sst files.
   183  # cleanup-import-sst-interval = "10m"
   184  
   185  [coprocessor]
   186  # When it is true, it will try to split a region with table prefix if
   187  # that region crosses tables. It is recommended to turn off this option
   188  # if there will be a large number of tables created.
   189  # split-region-on-table = true
   190  # When the region's size exceeds region-max-size, we will split the region
   191  # into two which the left region's size will be region-split-size or a little
   192  # bit smaller.
   193  # region-max-size = "144MB"
   194  # region-split-size = "96MB"
   195  
   196  [rocksdb]
   197  # Maximum number of concurrent background jobs (compactions and flushes)
   198  # max-background-jobs = 8
   199  
   200  # This value represents the maximum number of threads that will concurrently perform a
   201  # compaction job by breaking it into multiple, smaller ones that are run simultaneously.
   202  # Default: 1 (i.e. no subcompactions)
   203  # max-sub-compactions = 1
   204  
   205  # Number of open files that can be used by the DB.  You may need to
   206  # increase this if your database has a large working set. Value -1 means
   207  # files opened are always kept open. You can estimate number of files based
   208  # on target_file_size_base and target_file_size_multiplier for level-based
   209  # compaction.
   210  # If max-open-files = -1, RocksDB will prefetch index and filter blocks into
   211  # block cache at startup, so if your database has a large working set, it will
   212  # take several minutes to open the db.
   213  max-open-files = 1024
   214  
   215  # Max size of rocksdb's MANIFEST file.
   216  # For detailed explanation please refer to https://github.com/facebook/rocksdb/wiki/MANIFEST
   217  # max-manifest-file-size = "20MB"
   218  
   219  # If true, the database will be created if it is missing.
   220  # create-if-missing = true
   221  
   222  # rocksdb wal recovery mode
   223  # 0 : TolerateCorruptedTailRecords, tolerate incomplete record in trailing data on all logs;
   224  # 1 : AbsoluteConsistency, We don't expect to find any corruption in the WAL;
   225  # 2 : PointInTimeRecovery, Recover to point-in-time consistency;
   226  # 3 : SkipAnyCorruptedRecords, Recovery after a disaster;
   227  # wal-recovery-mode = 2
   228  
   229  # rocksdb write-ahead logs dir path
   230  # This specifies the absolute dir path for write-ahead logs (WAL).
   231  # If it is empty, the log files will be in the same dir as data.
   232  # When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set
   233  # wal-dir to a directory on a persistent storage.
   234  # See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database
   235  # wal-dir = "/tmp/tikv/store"
   236  
   237  # The following two fields affect how archived write-ahead logs will be deleted.
   238  # 1. If both set to 0, logs will be deleted asap and will not get into the archive.
   239  # 2. If wal-ttl-seconds is 0 and wal-size-limit is not 0,
   240  #    WAL files will be checked every 10 min and if total size is greater
   241  #    then wal-size-limit, they will be deleted starting with the
   242  #    earliest until size_limit is met. All empty files will be deleted.
   243  # 3. If wal-ttl-seconds is not 0 and wal-size-limit is 0, then
   244  #    WAL files will be checked every wal-ttl-seconds / 2 and those that
   245  #    are older than wal-ttl-seconds will be deleted.
   246  # 4. If both are not 0, WAL files will be checked every 10 min and both
   247  #    checks will be performed with ttl being first.
   248  # When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set
   249  # wal-ttl-seconds to a value greater than 0 (like 86400) and backup your db on a regular basis.
   250  # See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database
   251  # wal-ttl-seconds = 0
   252  # wal-size-limit = 0
   253  
   254  # rocksdb max total wal size
   255  # max-total-wal-size = "4GB"
   256  
   257  # Rocksdb Statistics provides cumulative stats over time.
   258  # Turn statistics on will introduce about 5%-10% overhead for RocksDB,
   259  # but it is worthy to know the internal status of RocksDB.
   260  # enable-statistics = true
   261  
   262  # Dump statistics periodically in information logs.
   263  # Same as rocksdb's default value (10 min).
   264  # stats-dump-period = "10m"
   265  
   266  # Due to Rocksdb FAQ: https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ,
   267  # If you want to use rocksdb on multi disks or spinning disks, you should set value at
   268  # least 2MB;
   269  # compaction-readahead-size = 0
   270  
   271  # This is the maximum buffer size that is used by WritableFileWrite
   272  # writable-file-max-buffer-size = "1MB"
   273  
   274  # Use O_DIRECT for both reads and writes in background flush and compactions
   275  # use-direct-io-for-flush-and-compaction = false
   276  
   277  # Limit the disk IO of compaction and flush. Compaction and flush can cause
   278  # terrible spikes if they exceed a certain threshold. Consider setting this to
   279  # 50% ~ 80% of the disk throughput for a more stable result. However, in heavy
   280  # write workload, limiting compaction and flush speed can cause write stalls too.
   281  # rate-bytes-per-sec = 0
   282  
   283  # Enable or disable the pipelined write
   284  # enable-pipelined-write = true
   285  
   286  # Allows OS to incrementally sync files to disk while they are being
   287  # written, asynchronously, in the background.
   288  # bytes-per-sync = "0MB"
   289  
   290  # Allows OS to incrementally sync WAL to disk while it is being written.
   291  # wal-bytes-per-sync = "0KB"
   292  
   293  # Specify the maximal size of the Rocksdb info log file. If the log file
   294  # is larger than `max_log_file_size`, a new info log file will be created.
   295  # If max_log_file_size == 0, all logs will be written to one log file.
   296  # Default: 1GB
   297  # info-log-max-size = "1GB"
   298  
   299  # Time for the Rocksdb info log file to roll (in seconds).
   300  # If specified with non-zero value, log file will be rolled
   301  # if it has been active longer than `log_file_time_to_roll`.
   302  # Default: 0 (disabled)
   303  # info-log-roll-time = "0"
   304  
   305  # Maximal Rocksdb info log files to be kept.
   306  # Default: 10
   307  # info-log-keep-log-file-num = 10
   308  
   309  # This specifies the Rocksdb info LOG dir.
   310  # If it is empty, the log files will be in the same dir as data.
   311  # If it is non empty, the log files will be in the specified dir,
   312  # and the db data dir's absolute path will be used as the log file
   313  # name's prefix.
   314  # Default: empty
   315  # info-log-dir = ""
   316  
   317  # Column Family default used to store actual data of the database.
   318  [rocksdb.defaultcf]
   319  # compression method (if any) is used to compress a block.
   320  #   no:     kNoCompression
   321  #   snappy: kSnappyCompression
   322  #   zlib:   kZlibCompression
   323  #   bzip2:  kBZip2Compression
   324  #   lz4:    kLZ4Compression
   325  #   lz4hc:  kLZ4HCCompression
   326  #   zstd:   kZSTD
   327  
   328  # per level compression
   329  # compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"]
   330  
   331  # Approximate size of user data packed per block.  Note that the
   332  # block size specified here corresponds to uncompressed data.
   333  # block-size = "64KB"
   334  
   335  # If you're doing point lookups you definitely want to turn bloom filters on, We use
   336  # bloom filters to avoid unnecessary disk reads. Default bits_per_key is 10, which
   337  # yields ~1% false positive rate. Larger bits_per_key values will reduce false positive
   338  # rate, but increase memory usage and space amplification.
   339  # bloom-filter-bits-per-key = 10
   340  
   341  # false means one sst file one bloom filter, true means evry block has a corresponding bloom filter
   342  # block-based-bloom-filter = false
   343  
   344  # level0-file-num-compaction-trigger = 4
   345  
   346  # Soft limit on number of level-0 files. We start slowing down writes at this point.
   347  # level0-slowdown-writes-trigger = 20
   348  
   349  # Maximum number of level-0 files.  We stop writes at this point.
   350  # level0-stop-writes-trigger = 36
   351  
   352  # Amount of data to build up in memory (backed by an unsorted log
   353  # on disk) before converting to a sorted on-disk file.
   354  # write-buffer-size = "128MB"
   355  
   356  # The maximum number of write buffers that are built up in memory.
   357  # max-write-buffer-number = 5
   358  
   359  # The minimum number of write buffers that will be merged together
   360  # before writing to storage.
   361  # min-write-buffer-number-to-merge = 1
   362  
   363  # Control maximum total data size for base level (level 1).
   364  # max-bytes-for-level-base = "512MB"
   365  
   366  # Target file size for compaction.
   367  # target-file-size-base = "8MB"
   368  
   369  # Max bytes for compaction.max_compaction_bytes
   370  # max-compaction-bytes = "2GB"
   371  
   372  # There are four different algorithms to pick files to compact.
   373  # 0 : ByCompensatedSize
   374  # 1 : OldestLargestSeqFirst
   375  # 2 : OldestSmallestSeqFirst
   376  # 3 : MinOverlappingRatio
   377  # compaction-pri = 3
   378  
   379  # block-cache used to cache uncompressed blocks, big block-cache can speed up read.
   380  # in normal cases should tune to 30%-50% system's total memory.
   381  # block-cache-size = "1GB"
   382  
   383  # Indicating if we'd put index/filter blocks to the block cache.
   384  # If not specified, each "table reader" object will pre-load index/filter block
   385  # during table initialization.
   386  # cache-index-and-filter-blocks = true
   387  
   388  # Pin level0 filter and index blocks in cache.
   389  # pin-l0-filter-and-index-blocks = true
   390  
   391  # Enable read amplication statistics.
   392  # value  =>  memory usage (percentage of loaded blocks memory)
   393  # 1      =>  12.50 %
   394  # 2      =>  06.25 %
   395  # 4      =>  03.12 %
   396  # 8      =>  01.56 %
   397  # 16     =>  00.78 %
   398  # read-amp-bytes-per-bit = 0
   399  
   400  # Pick target size of each level dynamically.
   401  # dynamic-level-bytes = true
   402  
   403  # Options for Column Family write
   404  # Column Family write used to store commit informations in MVCC model
   405  [rocksdb.writecf]
   406  # compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"]
   407  # block-size = "64KB"
   408  # write-buffer-size = "128MB"
   409  # max-write-buffer-number = 5
   410  # min-write-buffer-number-to-merge = 1
   411  # max-bytes-for-level-base = "512MB"
   412  # target-file-size-base = "8MB"
   413  
   414  # in normal cases should tune to 10%-30% system's total memory.
   415  # block-cache-size = "256MB"
   416  # level0-file-num-compaction-trigger = 4
   417  # level0-slowdown-writes-trigger = 20
   418  # level0-stop-writes-trigger = 36
   419  # cache-index-and-filter-blocks = true
   420  # pin-l0-filter-and-index-blocks = true
   421  # compaction-pri = 3
   422  # read-amp-bytes-per-bit = 0
   423  # dynamic-level-bytes = true
   424  
   425  [rocksdb.lockcf]
   426  # compression-per-level = ["no", "no", "no", "no", "no", "no", "no"]
   427  # block-size = "16KB"
   428  # write-buffer-size = "128MB"
   429  # max-write-buffer-number = 5
   430  # min-write-buffer-number-to-merge = 1
   431  # max-bytes-for-level-base = "128MB"
   432  # target-file-size-base = "8MB"
   433  # block-cache-size = "256MB"
   434  # level0-file-num-compaction-trigger = 1
   435  # level0-slowdown-writes-trigger = 20
   436  # level0-stop-writes-trigger = 36
   437  # cache-index-and-filter-blocks = true
   438  # pin-l0-filter-and-index-blocks = true
   439  # compaction-pri = 0
   440  # read-amp-bytes-per-bit = 0
   441  # dynamic-level-bytes = true
   442  
   443  [raftdb]
   444  # max-sub-compactions = 1
   445  max-open-files = 1024
   446  # max-manifest-file-size = "20MB"
   447  # create-if-missing = true
   448  
   449  # enable-statistics = true
   450  # stats-dump-period = "10m"
   451  
   452  # compaction-readahead-size = 0
   453  # writable-file-max-buffer-size = "1MB"
   454  # use-direct-io-for-flush-and-compaction = false
   455  # enable-pipelined-write = true
   456  # allow-concurrent-memtable-write = false
   457  # bytes-per-sync = "0MB"
   458  # wal-bytes-per-sync = "0KB"
   459  
   460  # info-log-max-size = "1GB"
   461  # info-log-roll-time = "0"
   462  # info-log-keep-log-file-num = 10
   463  # info-log-dir = ""
   464  
   465  [raftdb.defaultcf]
   466  # compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"]
   467  # block-size = "64KB"
   468  # write-buffer-size = "128MB"
   469  # max-write-buffer-number = 5
   470  # min-write-buffer-number-to-merge = 1
   471  # max-bytes-for-level-base = "512MB"
   472  # target-file-size-base = "8MB"
   473  
   474  # should tune to 256MB~2GB.
   475  # block-cache-size = "256MB"
   476  # level0-file-num-compaction-trigger = 4
   477  # level0-slowdown-writes-trigger = 20
   478  # level0-stop-writes-trigger = 36
   479  # cache-index-and-filter-blocks = true
   480  # pin-l0-filter-and-index-blocks = true
   481  # compaction-pri = 0
   482  # read-amp-bytes-per-bit = 0
   483  # dynamic-level-bytes = true
   484  
   485  [security]
   486  # set the path for certificates. Empty string means disabling secure connectoins.
   487  # ca-path = ""
   488  # cert-path = ""
   489  # key-path = ""
   490  
   491  [import]
   492  # the directory to store importing kv data.
   493  # import-dir = "/tmp/tikv/import"
   494  # number of threads to handle RPC requests.
   495  # num-threads = 8
   496  # stream channel window size, stream will be blocked on channel full.
   497  # stream-channel-window = 128