github.com/1aal/kubeblocks@v0.0.0-20231107070852-e1c03e598921/deploy/kafka/configs/kafka-server-constraint.cue (about)

     1  //Copyright (C) 2022-2023 ApeCloud Co., Ltd
     2  //
     3  //This file is part of KubeBlocks project
     4  //
     5  //This program is free software: you can redistribute it and/or modify
     6  //it under the terms of the GNU Affero General Public License as published by
     7  //the Free Software Foundation, either version 3 of the License, or
     8  //(at your option) any later version.
     9  //
    10  //This program is distributed in the hope that it will be useful
    11  //but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  //MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13  //GNU Affero General Public License for more details.
    14  //
    15  //You should have received a copy of the GNU Affero General Public License
    16  //along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17  
    18  // https://kafka.apache.org/documentation/#brokerconfigs
    19  #KafkaParameter: {
    20  
    21  	"allow.everyone.if.no.acl.found"?: bool
    22  
    23  	// The replication factor for the offsets topic (set higher to ensure availability). Internal topic creation will fail until the cluster size meets this replication factor requirement.
    24  	"offsets.topic.replication.factor"?: int & >=1 & <=32767
    25  
    26  	// The replication factor for the transaction topic (set higher to ensure availability). Internal topic creation will fail until the cluster size meets this replication factor requirement.
    27  	"transaction.state.log.replication.factor"?: int & >=1 & <=32767
    28  
    29  	// The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used
    30  	"log.flush.interval.ms"?: int
    31  
    32  	// The number of messages accumulated on a log partition before messages are flushed to disk
    33  	"log.flush.interval.messages"?: int & >=1
    34  
    35  	// Overridden min.insync.replicas config for the transaction topic.
    36  	"transaction.state.log.min.isr"?: int & >=1
    37  
    38  	// Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off
    39  	"delete.topic.enable"?: bool
    40  
    41  	// The largest record batch size allowed by Kafka
    42  	"message.max.bytes"?: int & >=0
    43  
    44  	// The number of threads that the server uses for receiving requests from the network and sending responses to the network
    45  	"num.network.threads"?: int & >=1
    46  
    47  	// The number of threads that the server uses for processing requests, which may include disk I/O
    48  	"num.io.threads"?: int & >=1
    49  
    50  	// The number of threads that can move replicas between log directories, which may include disk I/O
    51  	"num.replica.alter.log.dirs.threads"?: int
    52  
    53  	// The number of threads to use for various background processing tasks
    54  	"background.threads"?: int & >=1
    55  
    56  	// The number of queued requests allowed for data-plane, before blocking the network threads
    57  	"queued.max.requests"?: int & >=1
    58  
    59  	// The number of queued bytes allowed before no more requests are read
    60  	"queued.max.request.bytes"?: int
    61  
    62  	// The configuration controls the maximum amount of time the client will wait for the response of a request
    63  	"request.timeout.ms"?: int & >=0
    64  
    65  	// The amount of time the client will wait for the socket connection to be established. If the connection is not built before the timeout elapses, clients will close the socket channel.
    66  	"socket.connection.setup.timeout.ms"?: int
    67  
    68  	// The maximum amount of time the client will wait for the socket connection to be established.
    69  	"socket.connection.setup.timeout.max.ms"?: int
    70  
    71  	// This is the maximum number of bytes in the log between the latest snapshot and the high-watermark needed before generating a new snapshot.
    72  	"metadata.log.max.record.bytes.between.snapshots"?: int & >=1
    73  
    74  	// The length of time in milliseconds between broker heartbeats. Used when running in KRaft mode.
    75  	"broker.heartbeat.interval.ms"?: int
    76  
    77  	// The length of time in milliseconds that a broker lease lasts if no heartbeats are made. Used when running in KRaft mode.
    78  	"broker.session.timeout.ms"?: int
    79  
    80  	// SASL mechanism used for communication with controllers. Default is GSSAPI.
    81  	"sasl.mechanism.controller.protocol"?: string
    82  
    83  	// The maximum size of a single metadata log file.
    84  	"metadata.log.segment.bytes"?: int & >=12
    85  
    86  	// The maximum time before a new metadata log file is rolled out (in milliseconds).
    87  	"metadata.log.segment.ms"?: int
    88  
    89  	// The maximum combined size of the metadata log and snapshots before deleting old snapshots and log files.
    90  	"metadata.max.retention.bytes"?: int
    91  
    92  	// The number of milliseconds to keep a metadata log file or snapshot before deleting it. Since at least one snapshot must exist before any logs can be deleted, this is a soft limit.
    93  	"metadata.max.retention.ms"?: int
    94  
    95  	// This configuration controls how often the active controller should write no-op records to the metadata partition.
    96  	// If the value is 0, no-op records are not appended to the metadata partition. The default value is 500
    97  	"metadata.max.idle.interval.ms"?: int & >=0
    98  
    99  	// The fully qualified name of a class that implements org.apache.kafka.server.authorizer.Authorizer interface, which is used by the broker for authorization.
   100  	"authorizer.class.name"?: string
   101  
   102  	// A comma-separated list of listener names which may be started before the authorizer has finished initialization.
   103  	"early.start.listeners"?: string
   104  
   105  	// Name of listener used for communication between controller and brokers.
   106  	"control.plane.listener.name"?: string
   107  
   108  	// The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.
   109  	"socket.send.buffer.bytes"?: int
   110  
   111  	// The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.
   112  	"socket.receive.buffer.bytes"?: int
   113  
   114  	// The maximum number of bytes in a socket request
   115  	"socket.request.max.bytes"?: int & >=1
   116  
   117  	// The maximum number of pending connections on the socket.
   118  	// In Linux, you may also need to configure `somaxconn` and `tcp_max_syn_backlog` kernel parameters accordingly to make the configuration takes effect.
   119  	"socket.listen.backlog.size"?: int & >=1
   120  
   121  	// The maximum number of connections we allow from each ip address.
   122  	"max.connections.per.ip"?: int & >=0
   123  
   124  	// A comma-separated list of per-ip or hostname overrides to the default maximum number of connections. An example value is "hostName:100,127.0.0.1:200"
   125  	"max.connections.per.ip.overrides"?: string
   126  
   127  	// The maximum number of connections we allow in the broker at any time.
   128  	"max.connections"?: int & >=0
   129  
   130  	// The maximum connection creation rate we allow in the broker at any time.
   131  	"max.connection.creation.rate"?: int & >=0
   132  
   133  	// Close idle connections after the number of milliseconds specified by this config.
   134  	"connections.max.idle.ms"?: int
   135  
   136  	// Connection close delay on failed authentication: this is the time (in milliseconds) by which connection close will be delayed on authentication failure.
   137  	// This must be configured to be less than connections.max.idle.ms to prevent connection timeout.
   138  	"connection.failed.authentication.delay.ms"?: int & >=0
   139  
   140  	// Rack of the broker. This will be used in rack aware replication assignment for fault tolerance.
   141  	"broker.rack"?: string
   142  
   143  	// The default number of log partitions per topic
   144  	"num.partitions"?: int & >=1
   145  
   146  	// The maximum size of a single log file
   147  	"log.segment.bytes"?: int & >=14
   148  
   149  	// The maximum time before a new log segment is rolled out (in milliseconds). If not set, the value in log.roll.hours is used
   150  	"log.roll.ms"?: int
   151  
   152  	// The maximum time before a new log segment is rolled out (in hours), secondary to log.roll.ms property
   153  	"log.roll.hours"?: int & >=1
   154  
   155  	// The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used
   156  	"log.roll.jitter.ms"?: int
   157  
   158  	// The maximum jitter to subtract from logRollTimeMillis (in hours), secondary to log.roll.jitter.ms property
   159  	"log.roll.jitter.hours"?: int & >=0
   160  
   161  	// The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
   162  	"log.retention.ms"?: int
   163  
   164  	// The number of minutes to keep a log file before deleting it (in minutes), secondary to log.retention.ms property. If not set, the value in log.retention.hours is used
   165  	"log.retention.minutes"?: int
   166  
   167  	// The number of hours to keep a log file before deleting it (in hours), tertiary to log.retention.ms property
   168  	"log.retention.hours"?: int
   169  
   170  	// The maximum size of the log before deleting it
   171  	"log.retention.bytes"?: int
   172  
   173  	// The frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion
   174  	"log.retention.check.interval.ms"?: int & >=1
   175  
   176  	// The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies.
   177  	"log.cleanup.policy"?: string & "compact" | "delete"
   178  
   179  	// The number of background threads to use for log cleaning
   180  	"log.cleaner.threads"?: int & >=0
   181  
   182  	// The log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average
   183  	"log.cleaner.io.max.bytes.per.second"?: number
   184  
   185  	// The total memory used for log deduplication across all cleaner threads
   186  	"log.cleaner.dedupe.buffer.size"?: int
   187  
   188  	// The total memory used for log cleaner I/O buffers across all cleaner threads
   189  	"log.cleaner.io.buffer.size"?: int & >=0
   190  
   191  	// Log cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value will allow more log to be cleaned at once but will lead to more hash collisions
   192  	"log.cleaner.io.buffer.load.factor"?: number
   193  
   194  	// The amount of time to sleep when there are no logs to clean
   195  	"log.cleaner.backoff.ms"?: int & >=0
   196  
   197  	// The minimum ratio of dirty log to total log for a log to eligible for cleaning.
   198  	"log.cleaner.min.cleanable.ratio"?: number & >=0 & <=1
   199  
   200  	// Enable the log cleaner process to run on the server.
   201  	"log.cleaner.enable"?: bool
   202  
   203  	// The amount of time to retain delete tombstone markers for log compacted topics.
   204  	"log.cleaner.delete.retention.ms"?: int & >=0
   205  
   206  	// The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
   207  	"log.cleaner.min.compaction.lag.ms"?: int & >=0
   208  
   209  	// The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
   210  	"log.cleaner.max.compaction.lag.ms"?: int & >=1
   211  
   212  	// The maximum size in bytes of the offset index
   213  	"log.index.size.max.bytes"?: int & >=4
   214  
   215  	// The interval with which we add an entry to the offset index
   216  	"log.index.interval.bytes"?: int & >=0
   217  
   218  	// The amount of time to wait before deleting a file from the filesystem
   219  	"log.segment.delete.delay.ms"?: int & >=0
   220  
   221  	// The frequency in ms that the log flusher checks whether any log needs to be flushed to disk
   222  	"log.flush.scheduler.interval.ms"?: int
   223  
   224  	// The frequency with which we update the persistent record of the last flush which acts as the log recovery point
   225  	"log.flush.offset.checkpoint.interval.ms"?: int & >=0
   226  
   227  	// The frequency with which we update the persistent record of log start offset
   228  	"log.flush.start.offset.checkpoint.interval.ms"?: int & >=0
   229  
   230  	// Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true.
   231  	"log.preallocate"?: bool
   232  
   233  	// The number of threads per data directory to be used for log recovery at startup and flushing at shutdown
   234  	"num.recovery.threads.per.data.dir"?: int & >=1
   235  
   236  	// Enable auto creation of topic on the server
   237  	"auto.create.topics.enable"?: bool
   238  
   239  	// When a producer sets acks to "all" (or "-1"), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
   240  	// If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend).
   241  	"min.insync.replicas"?: int & >=1
   242  
   243  	// Specify the message format version the broker will use to append messages to the logs.
   244  	"log.message.format.version"?: string & "0.8.0" | "0.8.1" | "0.8.2" | "0.9.0" | "0.10.0-IV0" | "0.10.0-IV1" | "0.10.1-IV0" | "0.10.1-IV1" | "0.10.1-IV2" | "0.10.2-IV0" | "0.11.0-IV0" | "0.11.0-IV1" | "0.11.0-IV2" | "1.0-IV0" | "1.1-IV0" | "2.0-IV0" | "2.0-IV1" | "2.1-IV0" | "2.1-IV1" | "2.1-IV2" | "2.2-IV0" | "2.2-IV1" | "2.3-IV0" | "2.3-IV1" | "2.4-IV0" | "2.4-IV1" | "2.5-IV0" | "2.6-IV0" | "2.7-IV0" | "2.7-IV1" | "2.7-IV2" | "2.8-IV0" | "2.8-IV1" | "3.0-IV0" | "3.0-IV1" | "3.1-IV0" | "3.2-IV0" | "3.3-IV0" | "3.3-IV1" | "3.3-IV2" | "3.3-IV3" | "3.4-IV0"
   245  
   246  	// Define whether the timestamp in the message is message create time or log append time.
   247  	"log.message.timestamp.type"?: string & "CreateTime" | "LogAppendTime"
   248  
   249  	// The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
   250  	"log.message.timestamp.difference.max.ms"?: int & >=0
   251  
   252  	// The create topic policy class that should be used for validation.
   253  	"create.topic.policy.class.name"?: string
   254  
   255  	// The alter configs policy class that should be used for validation.
   256  	"alter.config.policy.class.name"?: string
   257  
   258  	// This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
   259  	"log.message.downconversion.enable"?: bool
   260  
   261  	// The socket timeout for controller-to-broker channels
   262  	"controller.socket.timeout.ms"?: int
   263  
   264  	// The default replication factors for automatically created topics
   265  	"default.replication.factor"?: int
   266  
   267  	// If a follower hasn't sent any fetch requests or hasn't consumed up to the leaders log end offset for at least this time, the leader will remove the follower from isr
   268  	"replica.lag.time.max.ms"?: int
   269  
   270  	// The socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms
   271  	"replica.socket.timeout.ms"?: int
   272  
   273  	// The socket receive buffer for network requests
   274  	"replica.socket.receive.buffer.bytes"?: int
   275  
   276  	// The number of bytes of messages to attempt to fetch for each partition.
   277  	"replica.fetch.max.bytes"?: int & >=0
   278  
   279  	// The maximum wait time for each fetcher request issued by follower replicas.
   280  	"replica.fetch.wait.max.ms"?: int
   281  
   282  	// The amount of time to sleep when fetch partition error occurs.
   283  	"replica.fetch.backoff.ms"?: int & >=0
   284  
   285  	// Minimum bytes expected for each fetch response. If not enough bytes, wait up to replica.fetch.wait.max.ms (broker config).
   286  	"replica.fetch.min.bytes"?: int
   287  
   288  	// Maximum bytes expected for the entire fetch response.
   289  	"replica.fetch.response.max.bytes"?: int & >=0
   290  
   291  	// Number of fetcher threads used to replicate records from each source broker.
   292  	"num.replica.fetchers"?: int
   293  
   294  	// The frequency with which the high watermark is saved out to disk
   295  	"replica.high.watermark.checkpoint.interval.ms"?: int
   296  
   297  	// The purge interval (in number of requests) of the fetch request purgatory
   298  	"fetch.purgatory.purge.interval.requests"?: int
   299  
   300  	// The purge interval (in number of requests) of the producer request purgatory
   301  	"producer.purgatory.purge.interval.requests"?: int
   302  
   303  	// The purge interval (in number of requests) of the delete records request purgatory
   304  	"delete.records.purgatory.purge.interval.requests"?: int
   305  
   306  	// Enables auto leader balancing.
   307  	"auto.leader.rebalance.enable"?: bool
   308  
   309  	// The ratio of leader imbalance allowed per broker.
   310  	"leader.imbalance.per.broker.percentage"?: int
   311  
   312  	// The frequency with which the partition rebalance check is triggered by the controller
   313  	"leader.imbalance.check.interval.seconds"?: int & >=1
   314  
   315  	// Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss
   316  	"unclean.leader.election.enable"?: bool
   317  
   318  	// Security protocol used to communicate between brokers.
   319  	"security.inter.broker.protocol"?: string & "PLAINTEXT" | "SSL" | "SASL_PLAINTEXT" | "SASL_SSL"
   320  
   321  	// The fully qualified class name that implements ReplicaSelector.
   322  	"replica.selector.class"?: string
   323  
   324  	// Controlled shutdown can fail for multiple reasons. This determines the number of retries when such failure happens
   325  	"controlled.shutdown.max.retries"?: int
   326  
   327  	// Before each retry, the system needs time to recover from the state that caused the previous failure (Controller fail over, replica lag etc)
   328  	"controlled.shutdown.retry.backoff.ms"?: int
   329  
   330  	// Enable controlled shutdown of the server
   331  	"controlled.shutdown.enable"?: bool
   332  
   333  	// The minimum allowed session timeout for registered consumers.
   334  	"group.min.session.timeout.ms"?: int
   335  
   336  	// The maximum allowed session timeout for registered consumers.
   337  	"group.max.session.timeout.ms"?: int
   338  
   339  	// The amount of time the group coordinator will wait for more consumers to join a new group before performing the first rebalance.
   340  	"group.initial.rebalance.delay.ms"?: int
   341  
   342  	// The maximum number of consumers that a single consumer group can accommodate.
   343  	"group.max.size"?: int & >=1
   344  
   345  	// The maximum size for a metadata entry associated with an offset commit
   346  	"offset.metadata.max.bytes"?: int
   347  
   348  	// Batch size for reading from the offsets segments when loading offsets into the cache (soft-limit, overridden if records are too large).
   349  	"offsets.load.buffer.size"?: int & >=1
   350  
   351  	// The number of partitions for the offset commit topic (should not change after deployment)
   352  	"offsets.topic.num.partitions"?: int & >=1
   353  
   354  	// The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads
   355  	"offsets.topic.segment.bytes"?: int & >=1
   356  
   357  	// Compression codec for the offsets topic - compression may be used to achieve "atomic" commits
   358  	"offsets.topic.compression.codec"?: int
   359  
   360  	// For subscribed consumers, committed offset of a specific partition will be expired and discarded when 1) this retention
   361  	// period has elapsed after the consumer group loses all its consumers (i.e. becomes empty); 2) this retention period has elapsed
   362  	// since the last time an offset is committed for the partition and the group is no longer subscribed to the corresponding topic.
   363  	"offsets.retention.minutes"?: int & >=1
   364  
   365  	// Frequency at which to check for stale offsets
   366  	"offsets.retention.check.interval.ms"?: int & >=1
   367  
   368  	// Offset commit will be delayed until all replicas for the offsets topic receive the commit or this timeout is reached. This is similar to the producer request timeout.
   369  	"offsets.commit.timeout.ms"?: int & >=1
   370  
   371  	// The required acks before the commit can be accepted. In general, the default (-1) should not be overridden
   372  	"offsets.commit.required.acks"?: int
   373  
   374  	// Specify the final compression type for a given topic.
   375  	"compression.type"?: string & "uncompressed" | "zstd" | "lz4" | "snappy" | "gzip" | "producer"
   376  
   377  	// The time in ms that the transaction coordinator will wait without receiving any transaction status updates for the current transaction before expiring its transactional id.
   378  	"transactional.id.expiration.ms"?: int & >=1
   379  
   380  	// The maximum allowed timeout for transactions.
   381  	"transaction.max.timeout.ms"?: int & >=1
   382  
   383  	// Batch size for reading from the transaction log segments when loading producer ids and transactions into the cache (soft-limit, overridden if records are too large).
   384  	"transaction.state.log.load.buffer.size"?: int & >=1
   385  
   386  	// The number of partitions for the transaction topic (should not change after deployment).
   387  	"transaction.state.log.num.partitions"?: int & >=1
   388  
   389  	// The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads
   390  	"transaction.state.log.segment.bytes"?: int & >=1
   391  
   392  	// The interval at which to rollback transactions that have timed out
   393  	"transaction.abort.timed.out.transaction.cleanup.interval.ms"?: int & >=1
   394  
   395  	// The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing
   396  	"transaction.remove.expired.transaction.cleanup.interval.ms"?: int & >=1
   397  
   398  	// The time in ms that a topic partition leader will wait before expiring producer IDs.
   399  	// "producer.id.expiration.ms"?: int & >=1
   400  
   401  	// The maximum number of incremental fetch sessions that we will maintain.
   402  	"max.incremental.fetch.session.cache.slots"?: int & >=0
   403  
   404  	// The maximum amount of data the server should return for a fetch request.
   405  	"fetch.max.bytes"?: int & >=0
   406  
   407  	// The number of samples maintained to compute metrics.
   408  	"metrics.num.samples"?: int & >=1
   409  
   410  	// The window of time a metrics sample is computed over.
   411  	"metrics.sample.window.ms"?: int & >=0
   412  
   413  	// The highest recording level for metrics.
   414  	"metrics.recording.level"?: string & "INFO" | "DEBUG" | "TRACE"
   415  
   416  	// A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.
   417  	"metric.reporters"?: string
   418  
   419  	// A list of classes to use as Yammer metrics custom reporters.
   420  	"kafka.metrics.reporters"?: string
   421  
   422  	// The metrics polling interval (in seconds) which can be used in kafka.metrics.reporters implementations.
   423  	"kafka.metrics.polling.interval.secs"?: int & >=1
   424  
   425  	// The number of samples to retain in memory for client quotas
   426  	"quota.window.num"?: int & >=1
   427  
   428  	// The number of samples to retain in memory for replication quotas
   429  	"replication.quota.window.num"?: int & >=1
   430  
   431  	// The number of samples to retain in memory for alter log dirs replication quotas
   432  	"alter.log.dirs.replication.quota.window.num"?: int & >=1
   433  
   434  	// The number of samples to retain in memory for controller mutation quotas
   435  	"controller.quota.window.num"?: int & >=1
   436  
   437  	// The time span of each sample for client quotas
   438  	"quota.window.size.seconds"?: int & >=1
   439  
   440  	// The time span of each sample for replication quotas
   441  	"replication.quota.window.size.seconds"?: int & >=1
   442  
   443  	// The time span of each sample for alter log dirs replication quotas
   444  	"alter.log.dirs.replication.quota.window.size.seconds"?: int & >=1
   445  
   446  	// The time span of each sample for controller mutations quotas
   447  	"controller.quota.window.size.seconds"?: int & >=1
   448  
   449  	// The fully qualified name of a class that implements the ClientQuotaCallback interface, which is used to determine quota limits applied to client requests.
   450  	"client.quota.callback.class"?: string
   451  
   452  	// When explicitly set to a positive number (the default is 0, not a positive number), a session lifetime that will not exceed the configured value will be communicated to v2.2.0 or later clients when they authenticate.
   453  	"connections.max.reauth.ms"?: int
   454  
   455  	// The maximum receive size allowed before and during initial SASL authentication.
   456  	"sasl.server.max.receive.size"?: int
   457  
   458  	// A list of configurable creator classes each returning a provider implementing security algorithms.
   459  	"security.providers"?: string
   460  
   461  	// The SSL protocol used to generate the SSLContext.
   462  	"ssl.protocol"?: string & "TLSv1.2" | "TLSv1.3" | "TLS" | "TLSv1.1" | "SSL" | "SSLv2" | "SSLv3"
   463  
   464  	// The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.
   465  	"ssl.provider"?: string
   466  
   467  	// The list of protocols enabled for SSL connections.
   468  	"ssl.enabled.protocols"?: string
   469  
   470  	// The file format of the key store file. This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].
   471  	"ssl.keystore.type"?: string
   472  
   473  	// The location of the key store file. This is optional for client and can be used for two-way authentication for client.
   474  	"ssl.keystore.location"?: string
   475  
   476  	// The store password for the key store file. This is optional for client and only needed if 'ssl.keystore.location' is configured. Key store password is not supported for PEM format.
   477  	"ssl.keystore.password"?: string
   478  
   479  	// The password of the private key in the key store file or the PEM key specified in 'ssl.keystore.key'.
   480  	"ssl.key.password"?: string
   481  
   482  	// Private key in the format specified by 'ssl.keystore.type'.
   483  	"ssl.keystore.key"?: string
   484  
   485  	// Certificate chain in the format specified by 'ssl.keystore.type'. Default SSL engine factory supports only PEM format with a list of X.509 certificates
   486  	"ssl.keystore.certificate.chain"?: string
   487  
   488  	// The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].
   489  	"ssl.truststore.type"?: string
   490  
   491  	// The location of the trust store file.
   492  	"ssl.truststore.location"?: string
   493  
   494  	// The password for the trust store file. If a password is not set, trust store file configured will still be used, but integrity checking is disabled. Trust store password is not supported for PEM format.
   495  	"ssl.truststore.password"?: string
   496  
   497  	// Trusted certificates in the format specified by 'ssl.truststore.type'. Default SSL engine factory supports only PEM format with X.509 certificates.
   498  	"ssl.truststore.certificates"?: string
   499  
   500  	// The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.
   501  	"ssl.keymanager.algorithm"?: string
   502  
   503  	// The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.
   504  	"ssl.trustmanager.algorithm"?: string
   505  
   506  	// The endpoint identification algorithm to validate server hostname using server certificate.
   507  	"ssl.endpoint.identification.algorithm"?: string
   508  
   509  	// The SecureRandom PRNG implementation to use for SSL cryptography operations.
   510  	"ssl.secure.random.implementation"?: string
   511  
   512  	// Configures kafka broker to request client authentication.
   513  	"ssl.client.auth"?: string & "required" | "requested" | "none"
   514  
   515  	// A list of cipher suites. This is a named combination of authentication, encryption,
   516  	"ssl.cipher.suites"?: string
   517  
   518  	// A list of rules for mapping from distinguished name from the client certificate to short name.
   519  	"ssl.principal.mapping.rules"?: string
   520  
   521  	// The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory
   522  	"ssl.engine.factory.class"?: string
   523  
   524  	// SASL mechanism used for inter-broker communication. Default is GSSAPI.
   525  	"sasl.mechanism.inter.broker.protocol"?: string
   526  
   527  	// JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
   528  	"sasl.jaas.config"?: string
   529  
   530  	// The list of SASL mechanisms enabled in the Kafka server. The list may contain any mechanism for which a security provider is available. Only GSSAPI is enabled by default.
   531  	"sasl.enabled.mechanisms"?: string
   532  
   533  	// The fully qualified name of a SASL server callback handler class that implements the AuthenticateCallbackHandler interface.
   534  	"sasl.server.callback.handler.class"?: string
   535  
   536  	// The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.
   537  	"sasl.client.callback.handler.class"?: string
   538  
   539  	// The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin
   540  	"sasl.login.class"?: string
   541  
   542  	// The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface.
   543  	"sasl.login.callback.handler.class"?: string
   544  
   545  	// The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.
   546  	"sasl.kerberos.service.name"?: string
   547  
   548  	// Kerberos kinit command path.
   549  	"sasl.kerberos.kinit.cmd"?: string
   550  
   551  	// Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.
   552  	"sasl.kerberos.ticket.renew.window.factor"?: number
   553  
   554  	// Percentage of random jitter added to the renewal time.
   555  	"sasl.kerberos.ticket.renew.jitter"?: number
   556  
   557  	// Login thread sleep time between refresh attempts.
   558  	"sasl.kerberos.min.time.before.relogin"?: int
   559  
   560  	// A list of rules for mapping from principal names to short names (typically operating system usernames).
   561  	"sasl.kerberos.principal.to.local.rules"?: string
   562  
   563  	// Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.
   564  	"sasl.login.refresh.window.factor"?: number & >=0.5 & <=1.0
   565  
   566  	// The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time.
   567  	"sasl.login.refresh.window.jitter"?: number & >=0.0 & <=0.25
   568  
   569  	// The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds.
   570  	"sasl.login.refresh.min.period.seconds"?: int & >=0 & <=900
   571  
   572  	// The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds.
   573  	"sasl.login.refresh.buffer.seconds"?: int & >=0 & <=3600
   574  
   575  	// The (optional) value in milliseconds for the external authentication provider connection timeout. Currently applies only to OAUTHBEARER.
   576  	"sasl.login.connect.timeout.ms"?: int
   577  
   578  	// The (optional) value in milliseconds for the external authentication provider read timeout. Currently applies only to OAUTHBEARER.
   579  	"sasl.login.read.timeout.ms"?: int
   580  
   581  	// The (optional) value in milliseconds for the maximum wait between login attempts to the external authentication provider.
   582  	"sasl.login.retry.backoff.max.ms"?: int
   583  
   584  	// The (optional) value in milliseconds for the initial wait between login attempts to the external authentication provider.
   585  	"sasl.login.retry.backoff.ms"?: int
   586  
   587  	// The OAuth claim for the scope is often named "scope", but this (optional) setting can provide a different name to use for the scope included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.
   588  	"sasl.oauthbearer.scope.claim.name"?: string
   589  
   590  	// The OAuth claim for the subject is often named "sub", but this (optional) setting can provide a different name to use for the subject included in the JWT payload's claims if the OAuth/OIDC provider uses a different name for that claim.
   591  	"sasl.oauthbearer.sub.claim.name"?: string
   592  
   593  	// The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer's token endpoint URL to which requests will be made to login based on the configuration in sasl.jaas.config.
   594  	"sasl.oauthbearer.token.endpoint.url"?: string
   595  
   596  	// The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved.
   597  	"sasl.oauthbearer.jwks.endpoint.url"?: string
   598  
   599  	// The (optional) value in milliseconds for the broker to wait between refreshing its JWKS (JSON Web Key Set) cache that contains the keys to verify the signature of the JWT.
   600  	"sasl.oauthbearer.jwks.endpoint.refresh.ms"?: int
   601  
   602  	// The (optional) value in milliseconds for the initial wait between JWKS (JSON Web Key Set) retrieval attempts from the external authentication provider.
   603  	"sasl.oauthbearer.jwks.endpoint.retry.backoff.ms"?: int
   604  
   605  	// The (optional) value in milliseconds for the maximum wait between attempts to retrieve the JWKS (JSON Web Key Set) from the external authentication provider.
   606  	"sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms"?: int
   607  
   608  	// The (optional) value in seconds to allow for differences between the time of the OAuth/OIDC identity provider and the broker.
   609  	"sasl.oauthbearer.clock.skew.seconds"?: int
   610  
   611  	// The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
   612  	"sasl.oauthbearer.expected.audience"?: string
   613  
   614  	// The (optional) setting for the broker to use to verify that the JWT was created by the expected issuer.
   615  	"sasl.oauthbearer.expected.issuer"?: string
   616  
   617  	// Secret key to generate and verify delegation tokens. The same key must be configured across all the brokers. If the key is not set or set to empty string, brokers will disable the delegation token support.
   618  	"delegation.token.secret.key"?: string
   619  
   620  	// The token has a maximum lifetime beyond which it cannot be renewed anymore. Default value 7 days.
   621  	"delegation.token.max.lifetime.ms"?: int & >=1
   622  
   623  	// The token validity time in miliseconds before the token needs to be renewed. Default value 1 day.
   624  	"delegation.token.expiry.time.ms"?: int & >=1
   625  
   626  	// Scan interval to remove expired delegation tokens.
   627  	"delegation.token.expiry.check.interval.ms"?: int & >=1
   628  
   629  	// The secret used for encoding dynamically configured passwords for this broker.
   630  	"password.encoder.secret"?: string
   631  
   632  	// The old secret that was used for encoding dynamically configured passwords.
   633  	"password.encoder.old.secret"?: string
   634  
   635  	// The SecretKeyFactory algorithm used for encoding dynamically configured passwords.
   636  	"password.encoder.keyfactory.algorithm"?: string
   637  
   638  	// The Cipher algorithm used for encoding dynamically configured passwords.
   639  	"password.encoder.cipher.algorithm"?: string
   640  
   641  	// The key length used for encoding dynamically configured passwords.
   642  	"password.encoder.key.length"?: int & >=8
   643  
   644  	// The iteration count used for encoding dynamically configured passwords.
   645  	"password.encoder.iterations"?: int & >=1024
   646  
   647  	// Maximum time in milliseconds to wait without being able to fetch from the leader before triggering a new election
   648  	"controller.quorum.election.timeout.ms"?: int
   649  
   650  	// Maximum time without a successful fetch from the current leader before becoming a candidate and triggering an election for voters; Maximum time without receiving fetch from a majority of the quorum before asking around to see if there's a new epoch for leader
   651  	"controller.quorum.fetch.timeout.ms"?: int
   652  
   653  	// Maximum time in milliseconds before starting new elections. This is used in the binary exponential backoff mechanism that helps prevent gridlocked elections
   654  	"controller.quorum.election.backoff.max.ms"?: int
   655  
   656  	// The duration in milliseconds that the leader will wait for writes to accumulate before flushing them to disk.
   657  	"controller.quorum.append.linger.ms"?: int
   658  
   659  	// The configuration controls the maximum amount of time the client will wait for the response of a request.
   660  	"controller.quorum.request.timeout.ms"?: int
   661  
   662  	// The amount of time to wait before attempting to retry a failed request to a given topic partition.
   663  	"controller.quorum.retry.backoff.ms"?: int
   664  
   665  	// other parameters
   666  	...
   667  }