github.com/confluentinc/confluent-kafka-go@v1.9.2/kafka/librdkafka_vendor/rdkafka.h (about) 1 /* 2 * librdkafka - Apache Kafka C library 3 * 4 * Copyright (c) 2012-2022 Magnus Edenhill 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /** 30 * @file rdkafka.h 31 * @brief Apache Kafka C/C++ consumer and producer client library. 32 * 33 * rdkafka.h contains the public API for librdkafka. 34 * The API is documented in this file as comments prefixing the function, type, 35 * enum, define, etc. 36 * 37 * @sa For the C++ interface see rdkafkacpp.h 38 * 39 * @tableofcontents 40 */ 41 42 43 /* @cond NO_DOC */ 44 #ifndef _RDKAFKA_H_ 45 #define _RDKAFKA_H_ 46 47 #include <stdio.h> 48 #include <inttypes.h> 49 #include <sys/types.h> 50 51 #ifdef __cplusplus 52 extern "C" { 53 #if 0 54 } /* Restore indent */ 55 #endif 56 #endif 57 58 #ifdef _WIN32 59 #include <basetsd.h> 60 #ifndef WIN32_MEAN_AND_LEAN 61 #define WIN32_MEAN_AND_LEAN 62 #endif 63 #include <winsock2.h> /* for sockaddr, .. */ 64 #ifndef _SSIZE_T_DEFINED 65 #define _SSIZE_T_DEFINED 66 typedef SSIZE_T ssize_t; 67 #endif 68 #define RD_UNUSED 69 #define RD_INLINE __inline 70 #define RD_DEPRECATED __declspec(deprecated) 71 #define RD_FORMAT(...) 72 #undef RD_EXPORT 73 #ifdef LIBRDKAFKA_STATICLIB 74 #define RD_EXPORT 75 #else 76 #ifdef LIBRDKAFKA_EXPORTS 77 #define RD_EXPORT __declspec(dllexport) 78 #else 79 #define RD_EXPORT __declspec(dllimport) 80 #endif 81 #ifndef LIBRDKAFKA_TYPECHECKS 82 #define LIBRDKAFKA_TYPECHECKS 0 83 #endif 84 #endif 85 86 #else 87 #include <sys/socket.h> /* for sockaddr, .. */ 88 89 #define RD_UNUSED __attribute__((unused)) 90 #define RD_INLINE inline 91 #define RD_EXPORT 92 #define RD_DEPRECATED __attribute__((deprecated)) 93 94 #if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) 95 #define RD_FORMAT(...) __attribute__((format(__VA_ARGS__))) 96 #else 97 #define RD_FORMAT(...) 98 #endif 99 100 #ifndef LIBRDKAFKA_TYPECHECKS 101 #define LIBRDKAFKA_TYPECHECKS 1 102 #endif 103 #endif 104 105 106 /** 107 * @brief Type-checking macros 108 * Compile-time checking that \p ARG is of type \p TYPE. 109 * @returns \p RET 110 */ 111 #if LIBRDKAFKA_TYPECHECKS 112 #define _LRK_TYPECHECK(RET, TYPE, ARG) \ 113 ({ \ 114 if (0) { \ 115 TYPE __t RD_UNUSED = (ARG); \ 116 } \ 117 RET; \ 118 }) 119 120 #define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) \ 121 ({ \ 122 if (0) { \ 123 TYPE __t RD_UNUSED = (ARG); \ 124 TYPE2 __t2 RD_UNUSED = (ARG2); \ 125 } \ 126 RET; \ 127 }) 128 129 #define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) \ 130 ({ \ 131 if (0) { \ 132 TYPE __t RD_UNUSED = (ARG); \ 133 TYPE2 __t2 RD_UNUSED = (ARG2); \ 134 TYPE3 __t3 RD_UNUSED = (ARG3); \ 135 } \ 136 RET; \ 137 }) 138 #else 139 #define _LRK_TYPECHECK(RET, TYPE, ARG) (RET) 140 #define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) (RET) 141 #define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) (RET) 142 #endif 143 144 /* @endcond */ 145 146 147 /** 148 * @name librdkafka version 149 * @{ 150 * 151 * 152 */ 153 154 /** 155 * @brief librdkafka version 156 * 157 * Interpreted as hex \c MM.mm.rr.xx: 158 * - MM = Major 159 * - mm = minor 160 * - rr = revision 161 * - xx = pre-release id (0xff is the final release) 162 * 163 * E.g.: \c 0x000801ff = 0.8.1 164 * 165 * @remark This value should only be used during compile time, 166 * for runtime checks of version use rd_kafka_version() 167 */ 168 #define RD_KAFKA_VERSION 0x010902ff 169 170 /** 171 * @brief Returns the librdkafka version as integer. 172 * 173 * @returns Version integer. 174 * 175 * @sa See RD_KAFKA_VERSION for how to parse the integer format. 176 * @sa Use rd_kafka_version_str() to retreive the version as a string. 177 */ 178 RD_EXPORT 179 int rd_kafka_version(void); 180 181 /** 182 * @brief Returns the librdkafka version as string. 183 * 184 * @returns Version string 185 */ 186 RD_EXPORT 187 const char *rd_kafka_version_str(void); 188 189 /**@}*/ 190 191 192 /** 193 * @name Constants, errors, types 194 * @{ 195 * 196 * 197 */ 198 199 200 /** 201 * @enum rd_kafka_type_t 202 * 203 * @brief rd_kafka_t handle type. 204 * 205 * @sa rd_kafka_new() 206 */ 207 typedef enum rd_kafka_type_t { 208 RD_KAFKA_PRODUCER, /**< Producer client */ 209 RD_KAFKA_CONSUMER /**< Consumer client */ 210 } rd_kafka_type_t; 211 212 213 /*! 214 * Timestamp types 215 * 216 * @sa rd_kafka_message_timestamp() 217 */ 218 typedef enum rd_kafka_timestamp_type_t { 219 RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ 220 RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ 221 RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ 222 } rd_kafka_timestamp_type_t; 223 224 225 226 /** 227 * @brief Retrieve supported debug contexts for use with the \c \"debug\" 228 * configuration property. (runtime) 229 * 230 * @returns Comma-separated list of available debugging contexts. 231 */ 232 RD_EXPORT 233 const char *rd_kafka_get_debug_contexts(void); 234 235 /** 236 * @brief Supported debug contexts. (compile time) 237 * 238 * @deprecated This compile time value may be outdated at runtime due to 239 * linking another version of the library. 240 * Use rd_kafka_get_debug_contexts() instead. 241 */ 242 #define RD_KAFKA_DEBUG_CONTEXTS \ 243 "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp," \ 244 "security,fetch,interceptor,plugin,consumer,admin,eos,mock,assignor," \ 245 "conf" 246 247 248 /* @cond NO_DOC */ 249 /* Private types to provide ABI compatibility */ 250 typedef struct rd_kafka_s rd_kafka_t; 251 typedef struct rd_kafka_topic_s rd_kafka_topic_t; 252 typedef struct rd_kafka_conf_s rd_kafka_conf_t; 253 typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t; 254 typedef struct rd_kafka_queue_s rd_kafka_queue_t; 255 typedef struct rd_kafka_op_s rd_kafka_event_t; 256 typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t; 257 typedef struct rd_kafka_consumer_group_metadata_s 258 rd_kafka_consumer_group_metadata_t; 259 typedef struct rd_kafka_error_s rd_kafka_error_t; 260 typedef struct rd_kafka_headers_s rd_kafka_headers_t; 261 typedef struct rd_kafka_group_result_s rd_kafka_group_result_t; 262 typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t; 263 /* @endcond */ 264 265 266 /** 267 * @enum rd_kafka_resp_err_t 268 * @brief Error codes. 269 * 270 * The negative error codes delimited by two underscores 271 * (\c RD_KAFKA_RESP_ERR__..) denotes errors internal to librdkafka and are 272 * displayed as \c \"Local: \<error string..\>\", while the error codes 273 * delimited by a single underscore (\c RD_KAFKA_RESP_ERR_..) denote broker 274 * errors and are displayed as \c \"Broker: \<error string..\>\". 275 * 276 * @sa Use rd_kafka_err2str() to translate an error code a human readable string 277 */ 278 typedef enum { 279 /* Internal errors to rdkafka: */ 280 /** Begin internal error codes */ 281 RD_KAFKA_RESP_ERR__BEGIN = -200, 282 /** Received message is incorrect */ 283 RD_KAFKA_RESP_ERR__BAD_MSG = -199, 284 /** Bad/unknown compression */ 285 RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, 286 /** Broker is going away */ 287 RD_KAFKA_RESP_ERR__DESTROY = -197, 288 /** Generic failure */ 289 RD_KAFKA_RESP_ERR__FAIL = -196, 290 /** Broker transport failure */ 291 RD_KAFKA_RESP_ERR__TRANSPORT = -195, 292 /** Critical system resource */ 293 RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, 294 /** Failed to resolve broker */ 295 RD_KAFKA_RESP_ERR__RESOLVE = -193, 296 /** Produced message timed out*/ 297 RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, 298 /** Reached the end of the topic+partition queue on 299 * the broker. Not really an error. 300 * This event is disabled by default, 301 * see the `enable.partition.eof` configuration property. */ 302 RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, 303 /** Permanent: Partition does not exist in cluster. */ 304 RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, 305 /** File or filesystem error */ 306 RD_KAFKA_RESP_ERR__FS = -189, 307 /** Permanent: Topic does not exist in cluster. */ 308 RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, 309 /** All broker connections are down. */ 310 RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, 311 /** Invalid argument, or invalid configuration */ 312 RD_KAFKA_RESP_ERR__INVALID_ARG = -186, 313 /** Operation timed out */ 314 RD_KAFKA_RESP_ERR__TIMED_OUT = -185, 315 /** Queue is full */ 316 RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, 317 /** ISR count < required.acks */ 318 RD_KAFKA_RESP_ERR__ISR_INSUFF = -183, 319 /** Broker node update */ 320 RD_KAFKA_RESP_ERR__NODE_UPDATE = -182, 321 /** SSL error */ 322 RD_KAFKA_RESP_ERR__SSL = -181, 323 /** Waiting for coordinator to become available. */ 324 RD_KAFKA_RESP_ERR__WAIT_COORD = -180, 325 /** Unknown client group */ 326 RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179, 327 /** Operation in progress */ 328 RD_KAFKA_RESP_ERR__IN_PROGRESS = -178, 329 /** Previous operation in progress, wait for it to finish. */ 330 RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177, 331 /** This operation would interfere with an existing subscription */ 332 RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176, 333 /** Assigned partitions (rebalance_cb) */ 334 RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175, 335 /** Revoked partitions (rebalance_cb) */ 336 RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174, 337 /** Conflicting use */ 338 RD_KAFKA_RESP_ERR__CONFLICT = -173, 339 /** Wrong state */ 340 RD_KAFKA_RESP_ERR__STATE = -172, 341 /** Unknown protocol */ 342 RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171, 343 /** Not implemented */ 344 RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170, 345 /** Authentication failure*/ 346 RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, 347 /** No stored offset */ 348 RD_KAFKA_RESP_ERR__NO_OFFSET = -168, 349 /** Outdated */ 350 RD_KAFKA_RESP_ERR__OUTDATED = -167, 351 /** Timed out in queue */ 352 RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, 353 /** Feature not supported by broker */ 354 RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165, 355 /** Awaiting cache update */ 356 RD_KAFKA_RESP_ERR__WAIT_CACHE = -164, 357 /** Operation interrupted (e.g., due to yield)) */ 358 RD_KAFKA_RESP_ERR__INTR = -163, 359 /** Key serialization error */ 360 RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = -162, 361 /** Value serialization error */ 362 RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = -161, 363 /** Key deserialization error */ 364 RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = -160, 365 /** Value deserialization error */ 366 RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = -159, 367 /** Partial response */ 368 RD_KAFKA_RESP_ERR__PARTIAL = -158, 369 /** Modification attempted on read-only object */ 370 RD_KAFKA_RESP_ERR__READ_ONLY = -157, 371 /** No such entry / item not found */ 372 RD_KAFKA_RESP_ERR__NOENT = -156, 373 /** Read underflow */ 374 RD_KAFKA_RESP_ERR__UNDERFLOW = -155, 375 /** Invalid type */ 376 RD_KAFKA_RESP_ERR__INVALID_TYPE = -154, 377 /** Retry operation */ 378 RD_KAFKA_RESP_ERR__RETRY = -153, 379 /** Purged in queue */ 380 RD_KAFKA_RESP_ERR__PURGE_QUEUE = -152, 381 /** Purged in flight */ 382 RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = -151, 383 /** Fatal error: see rd_kafka_fatal_error() */ 384 RD_KAFKA_RESP_ERR__FATAL = -150, 385 /** Inconsistent state */ 386 RD_KAFKA_RESP_ERR__INCONSISTENT = -149, 387 /** Gap-less ordering would not be guaranteed if proceeding */ 388 RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = -148, 389 /** Maximum poll interval exceeded */ 390 RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = -147, 391 /** Unknown broker */ 392 RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = -146, 393 /** Functionality not configured */ 394 RD_KAFKA_RESP_ERR__NOT_CONFIGURED = -145, 395 /** Instance has been fenced */ 396 RD_KAFKA_RESP_ERR__FENCED = -144, 397 /** Application generated error */ 398 RD_KAFKA_RESP_ERR__APPLICATION = -143, 399 /** Assignment lost */ 400 RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = -142, 401 /** No operation performed */ 402 RD_KAFKA_RESP_ERR__NOOP = -141, 403 /** No offset to automatically reset to */ 404 RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140, 405 406 /** End internal error codes */ 407 RD_KAFKA_RESP_ERR__END = -100, 408 409 /* Kafka broker errors: */ 410 /** Unknown broker error */ 411 RD_KAFKA_RESP_ERR_UNKNOWN = -1, 412 /** Success */ 413 RD_KAFKA_RESP_ERR_NO_ERROR = 0, 414 /** Offset out of range */ 415 RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, 416 /** Invalid message */ 417 RD_KAFKA_RESP_ERR_INVALID_MSG = 2, 418 /** Unknown topic or partition */ 419 RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, 420 /** Invalid message size */ 421 RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, 422 /** Leader not available */ 423 RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, 424 /** Not leader for partition */ 425 RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, 426 /** Request timed out */ 427 RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, 428 /** Broker not available */ 429 RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, 430 /** Replica not available */ 431 RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, 432 /** Message size too large */ 433 RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, 434 /** StaleControllerEpochCode */ 435 RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, 436 /** Offset metadata string too large */ 437 RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, 438 /** Broker disconnected before response received */ 439 RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, 440 /** Coordinator load in progress */ 441 RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14, 442 /** Group coordinator load in progress */ 443 #define RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS \ 444 RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS 445 /** Coordinator not available */ 446 RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15, 447 /** Group coordinator not available */ 448 #define RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE \ 449 RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE 450 /** Not coordinator */ 451 RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16, 452 /** Not coordinator for group */ 453 #define RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP \ 454 RD_KAFKA_RESP_ERR_NOT_COORDINATOR 455 /** Invalid topic */ 456 RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17, 457 /** Message batch larger than configured server segment size */ 458 RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18, 459 /** Not enough in-sync replicas */ 460 RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19, 461 /** Message(s) written to insufficient number of in-sync replicas */ 462 RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, 463 /** Invalid required acks value */ 464 RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21, 465 /** Specified group generation id is not valid */ 466 RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22, 467 /** Inconsistent group protocol */ 468 RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23, 469 /** Invalid group.id */ 470 RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, 471 /** Unknown member */ 472 RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25, 473 /** Invalid session timeout */ 474 RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26, 475 /** Group rebalance in progress */ 476 RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, 477 /** Commit offset data size is not valid */ 478 RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28, 479 /** Topic authorization failed */ 480 RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29, 481 /** Group authorization failed */ 482 RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, 483 /** Cluster authorization failed */ 484 RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, 485 /** Invalid timestamp */ 486 RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, 487 /** Unsupported SASL mechanism */ 488 RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, 489 /** Illegal SASL state */ 490 RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, 491 /** Unuspported version */ 492 RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, 493 /** Topic already exists */ 494 RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, 495 /** Invalid number of partitions */ 496 RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, 497 /** Invalid replication factor */ 498 RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, 499 /** Invalid replica assignment */ 500 RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, 501 /** Invalid config */ 502 RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, 503 /** Not controller for cluster */ 504 RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, 505 /** Invalid request */ 506 RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, 507 /** Message format on broker does not support request */ 508 RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, 509 /** Policy violation */ 510 RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44, 511 /** Broker received an out of order sequence number */ 512 RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45, 513 /** Broker received a duplicate sequence number */ 514 RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46, 515 /** Producer attempted an operation with an old epoch */ 516 RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47, 517 /** Producer attempted a transactional operation in an invalid state */ 518 RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48, 519 /** Producer attempted to use a producer id which is not 520 * currently assigned to its transactional id */ 521 RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49, 522 /** Transaction timeout is larger than the maximum 523 * value allowed by the broker's max.transaction.timeout.ms */ 524 RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50, 525 /** Producer attempted to update a transaction while another 526 * concurrent operation on the same transaction was ongoing */ 527 RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51, 528 /** Indicates that the transaction coordinator sending a 529 * WriteTxnMarker is no longer the current coordinator for a 530 * given producer */ 531 RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52, 532 /** Transactional Id authorization failed */ 533 RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53, 534 /** Security features are disabled */ 535 RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54, 536 /** Operation not attempted */ 537 RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55, 538 /** Disk error when trying to access log file on the disk */ 539 RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56, 540 /** The user-specified log directory is not found in the broker config 541 */ 542 RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57, 543 /** SASL Authentication failed */ 544 RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58, 545 /** Unknown Producer Id */ 546 RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59, 547 /** Partition reassignment is in progress */ 548 RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60, 549 /** Delegation Token feature is not enabled */ 550 RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61, 551 /** Delegation Token is not found on server */ 552 RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62, 553 /** Specified Principal is not valid Owner/Renewer */ 554 RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63, 555 /** Delegation Token requests are not allowed on this connection */ 556 RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64, 557 /** Delegation Token authorization failed */ 558 RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65, 559 /** Delegation Token is expired */ 560 RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66, 561 /** Supplied principalType is not supported */ 562 RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67, 563 /** The group is not empty */ 564 RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68, 565 /** The group id does not exist */ 566 RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69, 567 /** The fetch session ID was not found */ 568 RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70, 569 /** The fetch session epoch is invalid */ 570 RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71, 571 /** No matching listener */ 572 RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72, 573 /** Topic deletion is disabled */ 574 RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73, 575 /** Leader epoch is older than broker epoch */ 576 RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74, 577 /** Leader epoch is newer than broker epoch */ 578 RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75, 579 /** Unsupported compression type */ 580 RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76, 581 /** Broker epoch has changed */ 582 RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77, 583 /** Leader high watermark is not caught up */ 584 RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78, 585 /** Group member needs a valid member ID */ 586 RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79, 587 /** Preferred leader was not available */ 588 RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80, 589 /** Consumer group has reached maximum size */ 590 RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81, 591 /** Static consumer fenced by other consumer with same 592 * group.instance.id. */ 593 RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82, 594 /** Eligible partition leaders are not available */ 595 RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83, 596 /** Leader election not needed for topic partition */ 597 RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84, 598 /** No partition reassignment is in progress */ 599 RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85, 600 /** Deleting offsets of a topic while the consumer group is 601 * subscribed to it */ 602 RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86, 603 /** Broker failed to validate record */ 604 RD_KAFKA_RESP_ERR_INVALID_RECORD = 87, 605 /** There are unstable offsets that need to be cleared */ 606 RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88, 607 /** Throttling quota has been exceeded */ 608 RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89, 609 /** There is a newer producer with the same transactionalId 610 * which fences the current one */ 611 RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90, 612 /** Request illegally referred to resource that does not exist */ 613 RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91, 614 /** Request illegally referred to the same resource twice */ 615 RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92, 616 /** Requested credential would not meet criteria for acceptability */ 617 RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93, 618 /** Indicates that the either the sender or recipient of a 619 * voter-only request is not one of the expected voters */ 620 RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94, 621 /** Invalid update version */ 622 RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95, 623 /** Unable to update finalized features due to server error */ 624 RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96, 625 /** Request principal deserialization failed during forwarding */ 626 RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97, 627 628 RD_KAFKA_RESP_ERR_END_ALL, 629 } rd_kafka_resp_err_t; 630 631 632 /** 633 * @brief Error code value, name and description. 634 * Typically for use with language bindings to automatically expose 635 * the full set of librdkafka error codes. 636 */ 637 struct rd_kafka_err_desc { 638 rd_kafka_resp_err_t code; /**< Error code */ 639 const char *name; /**< Error name, same as code enum sans prefix */ 640 const char *desc; /**< Human readable error description. */ 641 }; 642 643 644 /** 645 * @brief Returns the full list of error codes. 646 */ 647 RD_EXPORT 648 void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, 649 size_t *cntp); 650 651 652 653 /** 654 * @brief Returns a human readable representation of a kafka error. 655 * 656 * @param err Error code to translate 657 */ 658 RD_EXPORT 659 const char *rd_kafka_err2str(rd_kafka_resp_err_t err); 660 661 662 663 /** 664 * @brief Returns the error code name (enum name). 665 * 666 * @param err Error code to translate 667 */ 668 RD_EXPORT 669 const char *rd_kafka_err2name(rd_kafka_resp_err_t err); 670 671 672 /** 673 * @brief Returns the last error code generated by a legacy API call 674 * in the current thread. 675 * 676 * The legacy APIs are the ones using errno to propagate error value, namely: 677 * - rd_kafka_topic_new() 678 * - rd_kafka_consume_start() 679 * - rd_kafka_consume_stop() 680 * - rd_kafka_consume() 681 * - rd_kafka_consume_batch() 682 * - rd_kafka_consume_callback() 683 * - rd_kafka_consume_queue() 684 * - rd_kafka_produce() 685 * 686 * The main use for this function is to avoid converting system \p errno 687 * values to rd_kafka_resp_err_t codes for legacy APIs. 688 * 689 * @remark The last error is stored per-thread, if multiple rd_kafka_t handles 690 * are used in the same application thread the developer needs to 691 * make sure rd_kafka_last_error() is called immediately after 692 * a failed API call. 693 * 694 * @remark errno propagation from librdkafka is not safe on Windows 695 * and should not be used, use rd_kafka_last_error() instead. 696 */ 697 RD_EXPORT 698 rd_kafka_resp_err_t rd_kafka_last_error(void); 699 700 701 /** 702 * @brief Converts the system errno value \p errnox to a rd_kafka_resp_err_t 703 * error code upon failure from the following functions: 704 * - rd_kafka_topic_new() 705 * - rd_kafka_consume_start() 706 * - rd_kafka_consume_stop() 707 * - rd_kafka_consume() 708 * - rd_kafka_consume_batch() 709 * - rd_kafka_consume_callback() 710 * - rd_kafka_consume_queue() 711 * - rd_kafka_produce() 712 * 713 * @param errnox System errno value to convert 714 * 715 * @returns Appropriate error code for \p errnox 716 * 717 * @remark A better alternative is to call rd_kafka_last_error() immediately 718 * after any of the above functions return -1 or NULL. 719 * 720 * @deprecated Use rd_kafka_last_error() to retrieve the last error code 721 * set by the legacy librdkafka APIs. 722 * 723 * @sa rd_kafka_last_error() 724 */ 725 RD_EXPORT RD_DEPRECATED rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); 726 727 728 /** 729 * @brief Returns the thread-local system errno 730 * 731 * On most platforms this is the same as \p errno but in case of different 732 * runtimes between library and application (e.g., Windows static DLLs) 733 * this provides a means for exposing the errno librdkafka uses. 734 * 735 * @remark The value is local to the current calling thread. 736 * 737 * @deprecated Use rd_kafka_last_error() to retrieve the last error code 738 * set by the legacy librdkafka APIs. 739 */ 740 RD_EXPORT RD_DEPRECATED int rd_kafka_errno(void); 741 742 743 744 /** 745 * @brief Returns the first fatal error set on this client instance, 746 * or RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has occurred. 747 * 748 * This function is to be used with the Idempotent Producer and \c error_cb 749 * to detect fatal errors. 750 * 751 * Generally all errors raised by \c error_cb are to be considered 752 * informational and temporary, the client will try to recover from all 753 * errors in a graceful fashion (by retrying, etc). 754 * 755 * However, some errors should logically be considered fatal to retain 756 * consistency; in particular a set of errors that may occur when using the 757 * Idempotent Producer and the in-order or exactly-once producer guarantees 758 * can't be satisfied. 759 * 760 * @param rk Client instance. 761 * @param errstr A human readable error string (nul-terminated) is written to 762 * this location that must be of at least \p errstr_size bytes. 763 * The \p errstr is only written to if there is a fatal error. 764 * @param errstr_size Writable size in \p errstr. 765 * 766 * 767 * @returns RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has been raised, else 768 * any other error code. 769 */ 770 RD_EXPORT 771 rd_kafka_resp_err_t 772 rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size); 773 774 775 /** 776 * @brief Trigger a fatal error for testing purposes. 777 * 778 * Since there is no practical way to trigger real fatal errors in the 779 * idempotent producer, this method allows an application to trigger 780 * fabricated fatal errors in tests to check its error handling code. 781 * 782 * @param rk Client instance. 783 * @param err The underlying error code. 784 * @param reason A human readable error reason. 785 * Will be prefixed with "test_fatal_error: " to differentiate 786 * from real fatal errors. 787 * 788 * @returns RD_KAFKA_RESP_ERR_NO_ERROR if a fatal error was triggered, or 789 * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error 790 * has already been triggered. 791 */ 792 RD_EXPORT rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, 793 rd_kafka_resp_err_t err, 794 const char *reason); 795 796 797 /** 798 * @returns the error code for \p error or RD_KAFKA_RESP_ERR_NO_ERROR if 799 * \p error is NULL. 800 */ 801 RD_EXPORT 802 rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error); 803 804 /** 805 * @returns the error code name for \p error, e.g, "ERR_UNKNOWN_MEMBER_ID", 806 * or an empty string if \p error is NULL. 807 * 808 * @remark The lifetime of the returned pointer is the same as the error object. 809 * 810 * @sa rd_kafka_err2name() 811 */ 812 RD_EXPORT 813 const char *rd_kafka_error_name(const rd_kafka_error_t *error); 814 815 /** 816 * @returns a human readable error string for \p error, 817 * or an empty string if \p error is NULL. 818 * 819 * @remark The lifetime of the returned pointer is the same as the error object. 820 */ 821 RD_EXPORT 822 const char *rd_kafka_error_string(const rd_kafka_error_t *error); 823 824 825 /** 826 * @returns 1 if the error is a fatal error, indicating that the client 827 * instance is no longer usable, else 0 (also if \p error is NULL). 828 */ 829 RD_EXPORT 830 int rd_kafka_error_is_fatal(const rd_kafka_error_t *error); 831 832 833 /** 834 * @returns 1 if the operation may be retried, 835 * else 0 (also if \p error is NULL). 836 */ 837 RD_EXPORT 838 int rd_kafka_error_is_retriable(const rd_kafka_error_t *error); 839 840 841 /** 842 * @returns 1 if the error is an abortable transaction error in which case 843 * the application must call rd_kafka_abort_transaction() and 844 * start a new transaction with rd_kafka_begin_transaction() if it 845 * wishes to proceed with transactions. 846 * Else returns 0 (also if \p error is NULL). 847 * 848 * @remark The return value of this method is only valid for errors returned 849 * by the transactional API. 850 */ 851 RD_EXPORT 852 int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error); 853 854 /** 855 * @brief Free and destroy an error object. 856 * 857 * @remark As a conveniance it is permitted to pass a NULL \p error. 858 */ 859 RD_EXPORT 860 void rd_kafka_error_destroy(rd_kafka_error_t *error); 861 862 863 /** 864 * @brief Create a new error object with error \p code and optional 865 * human readable error string in \p fmt. 866 * 867 * This method is mainly to be used for mocking errors in application test code. 868 * 869 * The returned object must be destroyed with rd_kafka_error_destroy(). 870 */ 871 RD_EXPORT 872 rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, 873 const char *fmt, 874 ...) RD_FORMAT(printf, 2, 3); 875 876 877 /** 878 * @brief Topic+Partition place holder 879 * 880 * Generic place holder for a Topic+Partition and its related information 881 * used for multiple purposes: 882 * - consumer offset (see rd_kafka_commit(), et.al.) 883 * - group rebalancing callback (rd_kafka_conf_set_rebalance_cb()) 884 * - offset commit result callback (rd_kafka_conf_set_offset_commit_cb()) 885 */ 886 887 /** 888 * @brief Generic place holder for a specific Topic+Partition. 889 * 890 * @sa rd_kafka_topic_partition_list_new() 891 */ 892 typedef struct rd_kafka_topic_partition_s { 893 char *topic; /**< Topic name */ 894 int32_t partition; /**< Partition */ 895 int64_t offset; /**< Offset */ 896 void *metadata; /**< Metadata */ 897 size_t metadata_size; /**< Metadata size */ 898 void *opaque; /**< Opaque value for application use */ 899 rd_kafka_resp_err_t err; /**< Error code, depending on use. */ 900 void *_private; /**< INTERNAL USE ONLY, 901 * INITIALIZE TO ZERO, DO NOT TOUCH */ 902 } rd_kafka_topic_partition_t; 903 904 905 /** 906 * @brief Destroy a rd_kafka_topic_partition_t. 907 * @remark This must not be called for elements in a topic partition list. 908 */ 909 RD_EXPORT 910 void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar); 911 912 913 /** 914 * @brief A growable list of Topic+Partitions. 915 * 916 */ 917 typedef struct rd_kafka_topic_partition_list_s { 918 int cnt; /**< Current number of elements */ 919 int size; /**< Current allocated size */ 920 rd_kafka_topic_partition_t *elems; /**< Element array[] */ 921 } rd_kafka_topic_partition_list_t; 922 923 924 /** 925 * @brief Create a new list/vector Topic+Partition container. 926 * 927 * @param size Initial allocated size used when the expected number of 928 * elements is known or can be estimated. 929 * Avoids reallocation and possibly relocation of the 930 * elems array. 931 * 932 * @returns A newly allocated Topic+Partition list. 933 * 934 * @remark Use rd_kafka_topic_partition_list_destroy() to free all resources 935 * in use by a list and the list itself. 936 * @sa rd_kafka_topic_partition_list_add() 937 */ 938 RD_EXPORT 939 rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size); 940 941 942 /** 943 * @brief Free all resources used by the list and the list itself. 944 */ 945 RD_EXPORT 946 void rd_kafka_topic_partition_list_destroy( 947 rd_kafka_topic_partition_list_t *rkparlist); 948 949 /** 950 * @brief Add topic+partition to list 951 * 952 * @param rktparlist List to extend 953 * @param topic Topic name (copied) 954 * @param partition Partition id 955 * 956 * @returns The object which can be used to fill in additionals fields. 957 */ 958 RD_EXPORT 959 rd_kafka_topic_partition_t * 960 rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, 961 const char *topic, 962 int32_t partition); 963 964 965 /** 966 * @brief Add range of partitions from \p start to \p stop inclusive. 967 * 968 * @param rktparlist List to extend 969 * @param topic Topic name (copied) 970 * @param start Start partition of range 971 * @param stop Last partition of range (inclusive) 972 */ 973 RD_EXPORT 974 void rd_kafka_topic_partition_list_add_range( 975 rd_kafka_topic_partition_list_t *rktparlist, 976 const char *topic, 977 int32_t start, 978 int32_t stop); 979 980 981 982 /** 983 * @brief Delete partition from list. 984 * 985 * @param rktparlist List to modify 986 * @param topic Topic name to match 987 * @param partition Partition to match 988 * 989 * @returns 1 if partition was found (and removed), else 0. 990 * 991 * @remark Any held indices to elems[] are unusable after this call returns 1. 992 */ 993 RD_EXPORT 994 int rd_kafka_topic_partition_list_del( 995 rd_kafka_topic_partition_list_t *rktparlist, 996 const char *topic, 997 int32_t partition); 998 999 1000 /** 1001 * @brief Delete partition from list by elems[] index. 1002 * 1003 * @returns 1 if partition was found (and removed), else 0. 1004 * 1005 * @sa rd_kafka_topic_partition_list_del() 1006 */ 1007 RD_EXPORT 1008 int rd_kafka_topic_partition_list_del_by_idx( 1009 rd_kafka_topic_partition_list_t *rktparlist, 1010 int idx); 1011 1012 1013 /** 1014 * @brief Make a copy of an existing list. 1015 * 1016 * @param src The existing list to copy. 1017 * 1018 * @returns A new list fully populated to be identical to \p src 1019 */ 1020 RD_EXPORT 1021 rd_kafka_topic_partition_list_t * 1022 rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src); 1023 1024 1025 1026 /** 1027 * @brief Set offset to \p offset for \p topic and \p partition 1028 * 1029 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or 1030 * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if \p partition was not found 1031 * in the list. 1032 */ 1033 RD_EXPORT 1034 rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset( 1035 rd_kafka_topic_partition_list_t *rktparlist, 1036 const char *topic, 1037 int32_t partition, 1038 int64_t offset); 1039 1040 1041 1042 /** 1043 * @brief Find element by \p topic and \p partition. 1044 * 1045 * @returns a pointer to the first matching element, or NULL if not found. 1046 */ 1047 RD_EXPORT 1048 rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find( 1049 const rd_kafka_topic_partition_list_t *rktparlist, 1050 const char *topic, 1051 int32_t partition); 1052 1053 1054 /** 1055 * @brief Sort list using comparator \p cmp. 1056 * 1057 * If \p cmp is NULL the default comparator will be used that 1058 * sorts by ascending topic name and partition. 1059 * 1060 * \p cmp_opaque is provided as the \p cmp_opaque argument to \p cmp. 1061 * 1062 */ 1063 RD_EXPORT void rd_kafka_topic_partition_list_sort( 1064 rd_kafka_topic_partition_list_t *rktparlist, 1065 int (*cmp)(const void *a, const void *b, void *cmp_opaque), 1066 void *cmp_opaque); 1067 1068 1069 /**@}*/ 1070 1071 1072 1073 /** 1074 * @name Var-arg tag types 1075 * @{ 1076 * 1077 */ 1078 1079 /** 1080 * @enum rd_kafka_vtype_t 1081 * 1082 * @brief Var-arg tag types 1083 * 1084 * @sa rd_kafka_producev() 1085 */ 1086 typedef enum rd_kafka_vtype_t { 1087 RD_KAFKA_VTYPE_END, /**< va-arg sentinel */ 1088 RD_KAFKA_VTYPE_TOPIC, /**< (const char *) Topic name */ 1089 RD_KAFKA_VTYPE_RKT, /**< (rd_kafka_topic_t *) Topic handle */ 1090 RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */ 1091 RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ 1092 RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ 1093 RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Per-message application opaque 1094 * value. This is the same as 1095 * the _private field in 1096 * rd_kafka_message_t, also known 1097 * as the msg_opaque. */ 1098 RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ 1099 RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */ 1100 RD_KAFKA_VTYPE_HEADER, /**< (const char *, const void *, ssize_t) 1101 * Message Header */ 1102 RD_KAFKA_VTYPE_HEADERS, /**< (rd_kafka_headers_t *) Headers list */ 1103 } rd_kafka_vtype_t; 1104 1105 1106 /** 1107 * @brief VTYPE + argument container for use with rd_kafka_produce_va() 1108 * 1109 * See RD_KAFKA_V_..() macros below for which union field corresponds 1110 * to which RD_KAFKA_VTYPE_... 1111 */ 1112 typedef struct rd_kafka_vu_s { 1113 rd_kafka_vtype_t vtype; /**< RD_KAFKA_VTYPE_.. */ 1114 /** Value union, see RD_KAFKA_V_.. macros for which field to use. */ 1115 union { 1116 const char *cstr; 1117 rd_kafka_topic_t *rkt; 1118 int i; 1119 int32_t i32; 1120 int64_t i64; 1121 struct { 1122 void *ptr; 1123 size_t size; 1124 } mem; 1125 struct { 1126 const char *name; 1127 const void *val; 1128 ssize_t size; 1129 } header; 1130 rd_kafka_headers_t *headers; 1131 void *ptr; 1132 char _pad[64]; /**< Padding size for future-proofness */ 1133 } u; 1134 } rd_kafka_vu_t; 1135 1136 /** 1137 * @brief Convenience macros for rd_kafka_vtype_t that takes the 1138 * correct arguments for each vtype. 1139 */ 1140 1141 /*! 1142 * va-arg end sentinel used to terminate the variable argument list 1143 */ 1144 #define RD_KAFKA_V_END RD_KAFKA_VTYPE_END 1145 1146 /*! 1147 * Topic name (const char *) 1148 * 1149 * rd_kafka_vu_t field: u.cstr 1150 */ 1151 #define RD_KAFKA_V_TOPIC(topic) \ 1152 _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \ 1153 (const char *)topic 1154 /*! 1155 * Topic object (rd_kafka_topic_t *) 1156 * 1157 * rd_kafka_vu_t field: u.rkt 1158 */ 1159 #define RD_KAFKA_V_RKT(rkt) \ 1160 _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \ 1161 (rd_kafka_topic_t *)rkt 1162 /*! 1163 * Partition (int32_t) 1164 * 1165 * rd_kafka_vu_t field: u.i32 1166 */ 1167 #define RD_KAFKA_V_PARTITION(partition) \ 1168 _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \ 1169 (int32_t)partition 1170 /*! 1171 * Message value/payload pointer and length (void *, size_t) 1172 * 1173 * rd_kafka_vu_t fields: u.mem.ptr, u.mem.size 1174 */ 1175 #define RD_KAFKA_V_VALUE(VALUE, LEN) \ 1176 _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \ 1177 (void *)VALUE, (size_t)LEN 1178 /*! 1179 * Message key pointer and length (const void *, size_t) 1180 * 1181 * rd_kafka_vu_t field: u.mem.ptr, rd_kafka_vu.t.u.mem.size 1182 */ 1183 #define RD_KAFKA_V_KEY(KEY, LEN) \ 1184 _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \ 1185 (void *)KEY, (size_t)LEN 1186 /*! 1187 * Message opaque pointer (void *) 1188 * Same as \c msg_opaque, \c produce(.., msg_opaque), 1189 * and \c rkmessage->_private . 1190 * 1191 * rd_kafka_vu_t field: u.ptr 1192 */ 1193 #define RD_KAFKA_V_OPAQUE(msg_opaque) \ 1194 _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, msg_opaque), \ 1195 (void *)msg_opaque 1196 /*! 1197 * Message flags (int) 1198 * @sa RD_KAFKA_MSG_F_COPY, et.al. 1199 * 1200 * rd_kafka_vu_t field: u.i 1201 */ 1202 #define RD_KAFKA_V_MSGFLAGS(msgflags) \ 1203 _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), (int)msgflags 1204 /*! 1205 * Timestamp in milliseconds since epoch UTC (int64_t). 1206 * A value of 0 will use the current wall-clock time. 1207 * 1208 * rd_kafka_vu_t field: u.i64 1209 */ 1210 #define RD_KAFKA_V_TIMESTAMP(timestamp) \ 1211 _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \ 1212 (int64_t)timestamp 1213 /*! 1214 * Add Message Header (const char *NAME, const void *VALUE, ssize_t LEN). 1215 * @sa rd_kafka_header_add() 1216 * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed 1217 * in the same call to producev(). 1218 * 1219 * rd_kafka_vu_t fields: u.header.name, u.header.val, u.header.size 1220 */ 1221 #define RD_KAFKA_V_HEADER(NAME, VALUE, LEN) \ 1222 _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \ 1223 const void *, VALUE, ssize_t, LEN), \ 1224 (const char *)NAME, (const void *)VALUE, (ssize_t)LEN 1225 1226 /*! 1227 * Message Headers list (rd_kafka_headers_t *). 1228 * The message object will assume ownership of the headers (unless producev() 1229 * fails). 1230 * Any existing headers will be replaced. 1231 * @sa rd_kafka_message_set_headers() 1232 * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed 1233 * in the same call to producev(). 1234 * 1235 * rd_kafka_vu_t fields: u.headers 1236 */ 1237 #define RD_KAFKA_V_HEADERS(HDRS) \ 1238 _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \ 1239 (rd_kafka_headers_t *)HDRS 1240 1241 1242 /**@}*/ 1243 1244 1245 /** 1246 * @name Message headers 1247 * @{ 1248 * 1249 * @brief Message headers consist of a list of (string key, binary value) pairs. 1250 * Duplicate keys are supported and the order in which keys were 1251 * added are retained. 1252 * 1253 * Header values are considered binary and may have three types of 1254 * value: 1255 * - proper value with size > 0 and a valid pointer 1256 * - empty value with size = 0 and any non-NULL pointer 1257 * - null value with size = 0 and a NULL pointer 1258 * 1259 * Headers require Apache Kafka broker version v0.11.0.0 or later. 1260 * 1261 * Header operations are O(n). 1262 */ 1263 1264 1265 /** 1266 * @brief Create a new headers list. 1267 * 1268 * @param initial_count Preallocate space for this number of headers. 1269 * Any number of headers may be added, updated and 1270 * removed regardless of the initial count. 1271 */ 1272 RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count); 1273 1274 /** 1275 * @brief Destroy the headers list. The object and any returned value pointers 1276 * are not usable after this call. 1277 */ 1278 RD_EXPORT void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs); 1279 1280 /** 1281 * @brief Make a copy of headers list \p src. 1282 */ 1283 RD_EXPORT rd_kafka_headers_t * 1284 rd_kafka_headers_copy(const rd_kafka_headers_t *src); 1285 1286 /** 1287 * @brief Add header with name \p name and value \p val (copied) of size 1288 * \p size (not including null-terminator). 1289 * 1290 * @param hdrs Headers list. 1291 * @param name Header name. 1292 * @param name_size Header name size (not including the null-terminator). 1293 * If -1 the \p name length is automatically acquired using 1294 * strlen(). 1295 * @param value Pointer to header value, or NULL (set size to 0 or -1). 1296 * @param value_size Size of header value. If -1 the \p value is assumed to be a 1297 * null-terminated string and the length is automatically 1298 * acquired using strlen(). 1299 * 1300 * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, 1301 * else RD_KAFKA_RESP_ERR_NO_ERROR. 1302 */ 1303 RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, 1304 const char *name, 1305 ssize_t name_size, 1306 const void *value, 1307 ssize_t value_size); 1308 1309 /** 1310 * @brief Remove all headers for the given key (if any). 1311 * 1312 * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, 1313 * RD_KAFKA_RESP_ERR__NOENT if no matching headers were found, 1314 * else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed. 1315 */ 1316 RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, 1317 const char *name); 1318 1319 1320 /** 1321 * @brief Find last header in list \p hdrs matching \p name. 1322 * 1323 * @param hdrs Headers list. 1324 * @param name Header to find (last match). 1325 * @param valuep (out) Set to a (null-terminated) const pointer to the value 1326 * (may be NULL). 1327 * @param sizep (out) Set to the value's size (not including null-terminator). 1328 * 1329 * @returns RD_KAFKA_RESP_ERR_NO_ERROR if an entry was found, else 1330 * RD_KAFKA_RESP_ERR__NOENT. 1331 * 1332 * @remark The returned pointer in \p valuep includes a trailing null-terminator 1333 * that is not accounted for in \p sizep. 1334 * @remark The returned pointer is only valid as long as the headers list and 1335 * the header item is valid. 1336 */ 1337 RD_EXPORT rd_kafka_resp_err_t 1338 rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, 1339 const char *name, 1340 const void **valuep, 1341 size_t *sizep); 1342 1343 /** 1344 * @brief Iterator for headers matching \p name. 1345 * 1346 * Same semantics as rd_kafka_header_get_last() 1347 * 1348 * @param hdrs Headers to iterate. 1349 * @param idx Iterator index, start at 0 and increment by one for each call 1350 * as long as RD_KAFKA_RESP_ERR_NO_ERROR is returned. 1351 * @param name Header name to match. 1352 * @param valuep (out) Set to a (null-terminated) const pointer to the value 1353 * (may be NULL). 1354 * @param sizep (out) Set to the value's size (not including null-terminator). 1355 */ 1356 RD_EXPORT rd_kafka_resp_err_t 1357 rd_kafka_header_get(const rd_kafka_headers_t *hdrs, 1358 size_t idx, 1359 const char *name, 1360 const void **valuep, 1361 size_t *sizep); 1362 1363 1364 /** 1365 * @brief Iterator for all headers. 1366 * 1367 * Same semantics as rd_kafka_header_get() 1368 * 1369 * @sa rd_kafka_header_get() 1370 */ 1371 RD_EXPORT rd_kafka_resp_err_t 1372 rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, 1373 size_t idx, 1374 const char **namep, 1375 const void **valuep, 1376 size_t *sizep); 1377 1378 1379 1380 /**@}*/ 1381 1382 1383 1384 /** 1385 * @name Kafka messages 1386 * @{ 1387 * 1388 */ 1389 1390 1391 1392 // FIXME: This doesn't show up in docs for some reason 1393 // "Compound rd_kafka_message_t is not documented." 1394 1395 /** 1396 * @brief A Kafka message as returned by the \c rd_kafka_consume*() family 1397 * of functions as well as provided to the Producer \c dr_msg_cb(). 1398 * 1399 * For the consumer this object has two purposes: 1400 * - provide the application with a consumed message. (\c err == 0) 1401 * - report per-topic+partition consumer errors (\c err != 0) 1402 * 1403 * The application must check \c err to decide what action to take. 1404 * 1405 * When the application is finished with a message it must call 1406 * rd_kafka_message_destroy() unless otherwise noted. 1407 */ 1408 typedef struct rd_kafka_message_s { 1409 rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ 1410 rd_kafka_topic_t *rkt; /**< Topic */ 1411 int32_t partition; /**< Partition */ 1412 void *payload; /**< Producer: original message payload. 1413 * Consumer: Depends on the value of \c err : 1414 * - \c err==0: Message payload. 1415 * - \c err!=0: Error string */ 1416 size_t len; /**< Depends on the value of \c err : 1417 * - \c err==0: Message payload length 1418 * - \c err!=0: Error string length */ 1419 void *key; /**< Depends on the value of \c err : 1420 * - \c err==0: Optional message key */ 1421 size_t key_len; /**< Depends on the value of \c err : 1422 * - \c err==0: Optional message key length*/ 1423 int64_t offset; /**< Consumer: 1424 * - Message offset (or offset for error 1425 * if \c err!=0 if applicable). 1426 * Producer, dr_msg_cb: 1427 * Message offset assigned by broker. 1428 * May be RD_KAFKA_OFFSET_INVALID 1429 * for retried messages when 1430 * idempotence is enabled. */ 1431 void *_private; /**< Consumer: 1432 * - rdkafka private pointer: DO NOT MODIFY 1433 * Producer: 1434 * - dr_msg_cb: 1435 * msg_opaque from produce() call or 1436 * RD_KAFKA_V_OPAQUE from producev(). */ 1437 } rd_kafka_message_t; 1438 1439 1440 /** 1441 * @brief Frees resources for \p rkmessage and hands ownership back to rdkafka. 1442 */ 1443 RD_EXPORT 1444 void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); 1445 1446 1447 1448 /** 1449 * @brief Returns the error string for an errored rd_kafka_message_t or NULL if 1450 * there was no error. 1451 * 1452 * @remark This function MUST NOT be used with the producer. 1453 */ 1454 RD_EXPORT 1455 const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage); 1456 1457 1458 /** 1459 * @brief Returns the message timestamp for a consumed message. 1460 * 1461 * The timestamp is the number of milliseconds since the epoch (UTC). 1462 * 1463 * \p tstype (if not NULL) is updated to indicate the type of timestamp. 1464 * 1465 * @returns message timestamp, or -1 if not available. 1466 * 1467 * @remark Message timestamps require broker version 0.10.0 or later. 1468 */ 1469 RD_EXPORT 1470 int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, 1471 rd_kafka_timestamp_type_t *tstype); 1472 1473 1474 1475 /** 1476 * @brief Returns the latency for a produced message measured from 1477 * the produce() call. 1478 * 1479 * @returns the latency in microseconds, or -1 if not available. 1480 */ 1481 RD_EXPORT 1482 int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage); 1483 1484 1485 /** 1486 * @brief Returns the broker id of the broker the message was produced to 1487 * or fetched from. 1488 * 1489 * @returns a broker id if known, else -1. 1490 */ 1491 RD_EXPORT 1492 int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage); 1493 1494 1495 /** 1496 * @brief Get the message header list. 1497 * 1498 * The returned pointer in \p *hdrsp is associated with the \p rkmessage and 1499 * must not be used after destruction of the message object or the header 1500 * list is replaced with rd_kafka_message_set_headers(). 1501 * 1502 * @returns RD_KAFKA_RESP_ERR_NO_ERROR if headers were returned, 1503 * RD_KAFKA_RESP_ERR__NOENT if the message has no headers, 1504 * or another error code if the headers could not be parsed. 1505 * 1506 * @remark Headers require broker version 0.11.0.0 or later. 1507 * 1508 * @remark As an optimization the raw protocol headers are parsed on 1509 * the first call to this function. 1510 */ 1511 RD_EXPORT rd_kafka_resp_err_t 1512 rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, 1513 rd_kafka_headers_t **hdrsp); 1514 1515 /** 1516 * @brief Get the message header list and detach the list from the message 1517 * making the application the owner of the headers. 1518 * The application must eventually destroy the headers using 1519 * rd_kafka_headers_destroy(). 1520 * The message's headers will be set to NULL. 1521 * 1522 * Otherwise same semantics as rd_kafka_message_headers() 1523 * 1524 * @sa rd_kafka_message_headers 1525 */ 1526 RD_EXPORT rd_kafka_resp_err_t 1527 rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, 1528 rd_kafka_headers_t **hdrsp); 1529 1530 1531 /** 1532 * @brief Replace the message's current headers with a new list. 1533 * 1534 * @param rkmessage The message to set headers. 1535 * @param hdrs New header list. The message object assumes ownership of 1536 * the list, the list will be destroyed automatically with 1537 * the message object. 1538 * The new headers list may be updated until the message object 1539 * is passed or returned to librdkafka. 1540 * 1541 * @remark The existing headers object, if any, will be destroyed. 1542 */ 1543 RD_EXPORT 1544 void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, 1545 rd_kafka_headers_t *hdrs); 1546 1547 1548 /** 1549 * @brief Returns the number of header key/value pairs 1550 * 1551 * @param hdrs Headers to count 1552 */ 1553 RD_EXPORT size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs); 1554 1555 1556 /** 1557 * @enum rd_kafka_msg_status_t 1558 * @brief Message persistence status can be used by the application to 1559 * find out if a produced message was persisted in the topic log. 1560 */ 1561 typedef enum { 1562 /** Message was never transmitted to the broker, or failed with 1563 * an error indicating it was not written to the log. 1564 * Application retry risks ordering, but not duplication. */ 1565 RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0, 1566 1567 /** Message was transmitted to broker, but no acknowledgement was 1568 * received. 1569 * Application retry risks ordering and duplication. */ 1570 RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1, 1571 1572 /** Message was written to the log and acknowledged by the broker. 1573 * No reason for application to retry. 1574 * Note: this value should only be trusted with \c acks=all. */ 1575 RD_KAFKA_MSG_STATUS_PERSISTED = 2 1576 } rd_kafka_msg_status_t; 1577 1578 1579 /** 1580 * @brief Returns the message's persistence status in the topic log. 1581 * 1582 * @remark The message status is not available in on_acknowledgement 1583 * interceptors. 1584 */ 1585 RD_EXPORT rd_kafka_msg_status_t 1586 rd_kafka_message_status(const rd_kafka_message_t *rkmessage); 1587 1588 /**@}*/ 1589 1590 1591 /** 1592 * @name Configuration interface 1593 * @{ 1594 * 1595 * @brief Main/global configuration property interface 1596 * 1597 */ 1598 1599 /** 1600 * @enum rd_kafka_conf_res_t 1601 * @brief Configuration result type 1602 */ 1603 typedef enum { 1604 RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ 1605 RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value or 1606 * property or value not supported in 1607 * this build. */ 1608 RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ 1609 } rd_kafka_conf_res_t; 1610 1611 1612 /** 1613 * @brief Create configuration object. 1614 * 1615 * When providing your own configuration to the \c rd_kafka_*_new_*() calls 1616 * the rd_kafka_conf_t objects needs to be created with this function 1617 * which will set up the defaults. 1618 * I.e.: 1619 * @code 1620 * rd_kafka_conf_t *myconf; 1621 * rd_kafka_conf_res_t res; 1622 * 1623 * myconf = rd_kafka_conf_new(); 1624 * res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600", 1625 * errstr, sizeof(errstr)); 1626 * if (res != RD_KAFKA_CONF_OK) 1627 * die("%s\n", errstr); 1628 * 1629 * rk = rd_kafka_new(..., myconf); 1630 * @endcode 1631 * 1632 * Please see CONFIGURATION.md for the default settings or use 1633 * rd_kafka_conf_properties_show() to provide the information at runtime. 1634 * 1635 * The properties are identical to the Apache Kafka configuration properties 1636 * whenever possible. 1637 * 1638 * @remark A successful call to rd_kafka_new() will assume ownership of 1639 * the conf object and rd_kafka_conf_destroy() must not be called. 1640 * 1641 * @returns A new rd_kafka_conf_t object with defaults set. 1642 * 1643 * @sa rd_kafka_new(), rd_kafka_conf_set(), rd_kafka_conf_destroy() 1644 */ 1645 RD_EXPORT 1646 rd_kafka_conf_t *rd_kafka_conf_new(void); 1647 1648 1649 /** 1650 * @brief Destroys a conf object. 1651 */ 1652 RD_EXPORT 1653 void rd_kafka_conf_destroy(rd_kafka_conf_t *conf); 1654 1655 1656 /** 1657 * @brief Creates a copy/duplicate of configuration object \p conf 1658 * 1659 * @remark Interceptors are NOT copied to the new configuration object. 1660 * @sa rd_kafka_interceptor_f_on_conf_dup 1661 */ 1662 RD_EXPORT 1663 rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf); 1664 1665 1666 /** 1667 * @brief Same as rd_kafka_conf_dup() but with an array of property name 1668 * prefixes to filter out (ignore) when copying. 1669 */ 1670 RD_EXPORT 1671 rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, 1672 size_t filter_cnt, 1673 const char **filter); 1674 1675 1676 1677 /** 1678 * @returns the configuration object used by an rd_kafka_t instance. 1679 * For use with rd_kafka_conf_get(), et.al., to extract configuration 1680 * properties from a running client. 1681 * 1682 * @remark the returned object is read-only and its lifetime is the same 1683 * as the rd_kafka_t object. 1684 */ 1685 RD_EXPORT 1686 const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk); 1687 1688 1689 /** 1690 * @brief Sets a configuration property. 1691 * 1692 * \p conf must have been previously created with rd_kafka_conf_new(). 1693 * 1694 * Fallthrough: 1695 * Topic-level configuration properties may be set using this interface 1696 * in which case they are applied on the \c default_topic_conf. 1697 * If no \c default_topic_conf has been set one will be created. 1698 * Any sub-sequent rd_kafka_conf_set_default_topic_conf() calls will 1699 * replace the current default topic configuration. 1700 * 1701 * @returns \c rd_kafka_conf_res_t to indicate success or failure. 1702 * In case of failure \p errstr is updated to contain a human readable 1703 * error string. 1704 * 1705 * @remark Setting properties or values that were disabled at build time due to 1706 * missing dependencies will return RD_KAFKA_CONF_INVALID. 1707 */ 1708 RD_EXPORT 1709 rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, 1710 const char *name, 1711 const char *value, 1712 char *errstr, 1713 size_t errstr_size); 1714 1715 1716 /** 1717 * @brief Enable event sourcing. 1718 * \p events is a bitmask of \c RD_KAFKA_EVENT_* of events to enable 1719 * for consumption by `rd_kafka_queue_poll()`. 1720 */ 1721 RD_EXPORT 1722 void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events); 1723 1724 1725 /** 1726 * @brief Generic event callback to be used with the event API to trigger 1727 * callbacks for \c rd_kafka_event_t objects from a background 1728 * thread serving the background queue. 1729 * 1730 * How to use: 1731 * 1. First set the event callback on the configuration object with this 1732 * function, followed by creating an rd_kafka_t instance 1733 * with rd_kafka_new(). 1734 * 2. Get the instance's background queue with rd_kafka_queue_get_background() 1735 * and pass it as the reply/response queue to an API that takes an 1736 * event queue, such as rd_kafka_CreateTopics(). 1737 * 3. As the response event is ready and enqueued on the background queue the 1738 * event callback will be triggered from the background thread. 1739 * 4. Prior to destroying the client instance, loose your reference to the 1740 * background queue by calling rd_kafka_queue_destroy(). 1741 * 1742 * The application must destroy the \c rkev passed to \p event cb using 1743 * rd_kafka_event_destroy(). 1744 * 1745 * The \p event_cb \c opaque argument is the opaque set with 1746 * rd_kafka_conf_set_opaque(). 1747 * 1748 * @remark This callback is a specialized alternative to the poll-based 1749 * event API described in the Event interface section. 1750 * 1751 * @remark The \p event_cb will be called spontaneously from a background 1752 * thread completely managed by librdkafka. 1753 * Take care to perform proper locking of application objects. 1754 * 1755 * @warning The application MUST NOT call rd_kafka_destroy() from the 1756 * event callback. 1757 * 1758 * @sa rd_kafka_queue_get_background 1759 */ 1760 RD_EXPORT void rd_kafka_conf_set_background_event_cb( 1761 rd_kafka_conf_t *conf, 1762 void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque)); 1763 1764 1765 /** 1766 * @deprecated See rd_kafka_conf_set_dr_msg_cb() 1767 */ 1768 RD_EXPORT 1769 void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, 1770 void (*dr_cb)(rd_kafka_t *rk, 1771 void *payload, 1772 size_t len, 1773 rd_kafka_resp_err_t err, 1774 void *opaque, 1775 void *msg_opaque)); 1776 1777 /** 1778 * @brief \b Producer: Set delivery report callback in provided \p conf object. 1779 * 1780 * The delivery report callback will be called once for each message 1781 * accepted by rd_kafka_produce() (et.al) with \p err set to indicate 1782 * the result of the produce request. 1783 * 1784 * The callback is called when a message is succesfully produced or 1785 * if librdkafka encountered a permanent failure. 1786 * Delivery errors occur when the retry count is exceeded, when the 1787 * message.timeout.ms timeout is exceeded or there is a permanent error 1788 * like RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART. 1789 * 1790 * An application must call rd_kafka_poll() at regular intervals to 1791 * serve queued delivery report callbacks. 1792 * 1793 * The broker-assigned offset can be retrieved with \c rkmessage->offset 1794 * and the timestamp can be retrieved using rd_kafka_message_timestamp(). 1795 * 1796 * The \p dr_msg_cb \c opaque argument is the opaque set with 1797 * rd_kafka_conf_set_opaque(). 1798 * The per-message msg_opaque value is available in 1799 * \c rd_kafka_message_t._private. 1800 * 1801 * @remark The Idempotent Producer may return invalid timestamp 1802 * (RD_KAFKA_TIMESTAMP_NOT_AVAILABLE), and 1803 * and offset (RD_KAFKA_OFFSET_INVALID) for retried messages 1804 * that were previously successfully delivered but not properly 1805 * acknowledged. 1806 */ 1807 RD_EXPORT 1808 void rd_kafka_conf_set_dr_msg_cb( 1809 rd_kafka_conf_t *conf, 1810 void (*dr_msg_cb)(rd_kafka_t *rk, 1811 const rd_kafka_message_t *rkmessage, 1812 void *opaque)); 1813 1814 1815 /** 1816 * @brief \b Consumer: Set consume callback for use with 1817 * rd_kafka_consumer_poll() 1818 * 1819 * The \p consume_cb \p opaque argument is the opaque set with 1820 * rd_kafka_conf_set_opaque(). 1821 */ 1822 RD_EXPORT 1823 void rd_kafka_conf_set_consume_cb( 1824 rd_kafka_conf_t *conf, 1825 void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque)); 1826 1827 /** 1828 * @brief \b Consumer: Set rebalance callback for use with 1829 * coordinated consumer group balancing. 1830 * 1831 * The \p err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS 1832 * or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions' 1833 * contains the full partition set that was either assigned or revoked. 1834 * 1835 * Registering a \p rebalance_cb turns off librdkafka's automatic 1836 * partition assignment/revocation and instead delegates that responsibility 1837 * to the application's \p rebalance_cb. 1838 * 1839 * The rebalance callback is responsible for updating librdkafka's 1840 * assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS 1841 * and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle 1842 * arbitrary rebalancing failures where \p err is neither of those. 1843 * @remark In this latter case (arbitrary error), the application must 1844 * call rd_kafka_assign(rk, NULL) to synchronize state. 1845 * 1846 * For eager/non-cooperative `partition.assignment.strategy` assignors, 1847 * such as `range` and `roundrobin`, the application must use 1848 * rd_kafka_assign() to set or clear the entire assignment. 1849 * For the cooperative assignors, such as `cooperative-sticky`, the application 1850 * must use rd_kafka_incremental_assign() for 1851 * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and rd_kafka_incremental_unassign() 1852 * for RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. 1853 * 1854 * Without a rebalance callback this is done automatically by librdkafka 1855 * but registering a rebalance callback gives the application flexibility 1856 * in performing other operations along with the assigning/revocation, 1857 * such as fetching offsets from an alternate location (on assign) 1858 * or manually committing offsets (on revoke). 1859 * 1860 * rebalance_cb is always triggered exactly once when a rebalance completes 1861 * with a new assignment, even if that assignment is empty. If an 1862 * eager/non-cooperative assignor is configured, there will eventually be 1863 * exactly one corresponding call to rebalance_cb to revoke these partitions 1864 * (even if empty), whether this is due to a group rebalance or lost 1865 * partitions. In the cooperative case, rebalance_cb will never be called if 1866 * the set of partitions being revoked is empty (whether or not lost). 1867 * 1868 * The callback's \p opaque argument is the opaque set with 1869 * rd_kafka_conf_set_opaque(). 1870 * 1871 * @remark The \p partitions list is destroyed by librdkafka on return 1872 * return from the rebalance_cb and must not be freed or 1873 * saved by the application. 1874 * 1875 * @remark Be careful when modifying the \p partitions list. 1876 * Changing this list should only be done to change the initial 1877 * offsets for each partition. 1878 * But a function like `rd_kafka_position()` might have unexpected 1879 * effects for instance when a consumer gets assigned a partition 1880 * it used to consume at an earlier rebalance. In this case, the 1881 * list of partitions will be updated with the old offset for that 1882 * partition. In this case, it is generally better to pass a copy 1883 * of the list (see `rd_kafka_topic_partition_list_copy()`). 1884 * The result of `rd_kafka_position()` is typically outdated in 1885 * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. 1886 * 1887 * @sa rd_kafka_assign() 1888 * @sa rd_kafka_incremental_assign() 1889 * @sa rd_kafka_incremental_unassign() 1890 * @sa rd_kafka_assignment_lost() 1891 * @sa rd_kafka_rebalance_protocol() 1892 * 1893 * The following example shows the application's responsibilities: 1894 * @code 1895 * static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, 1896 * rd_kafka_topic_partition_list_t *partitions, 1897 * void *opaque) { 1898 * 1899 * switch (err) 1900 * { 1901 * case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: 1902 * // application may load offets from arbitrary external 1903 * // storage here and update \p partitions 1904 * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) 1905 * rd_kafka_incremental_assign(rk, partitions); 1906 * else // EAGER 1907 * rd_kafka_assign(rk, partitions); 1908 * break; 1909 * 1910 * case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: 1911 * if (manual_commits) // Optional explicit manual commit 1912 * rd_kafka_commit(rk, partitions, 0); // sync commit 1913 * 1914 * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) 1915 * rd_kafka_incremental_unassign(rk, partitions); 1916 * else // EAGER 1917 * rd_kafka_assign(rk, NULL); 1918 * break; 1919 * 1920 * default: 1921 * handle_unlikely_error(err); 1922 * rd_kafka_assign(rk, NULL); // sync state 1923 * break; 1924 * } 1925 * } 1926 * @endcode 1927 * 1928 * @remark The above example lacks error handling for assign calls, see 1929 * the examples/ directory. 1930 */ 1931 RD_EXPORT 1932 void rd_kafka_conf_set_rebalance_cb( 1933 rd_kafka_conf_t *conf, 1934 void (*rebalance_cb)(rd_kafka_t *rk, 1935 rd_kafka_resp_err_t err, 1936 rd_kafka_topic_partition_list_t *partitions, 1937 void *opaque)); 1938 1939 1940 1941 /** 1942 * @brief \b Consumer: Set offset commit callback for use with consumer groups. 1943 * 1944 * The results of automatic or manual offset commits will be scheduled 1945 * for this callback and is served by rd_kafka_consumer_poll(). 1946 * 1947 * If no partitions had valid offsets to commit this callback will be called 1948 * with \p err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered 1949 * an error. 1950 * 1951 * The \p offsets list contains per-partition information: 1952 * - \c offset: committed offset (attempted) 1953 * - \c err: commit error 1954 * 1955 * The callback's \p opaque argument is the opaque set with 1956 * rd_kafka_conf_set_opaque(). 1957 */ 1958 RD_EXPORT 1959 void rd_kafka_conf_set_offset_commit_cb( 1960 rd_kafka_conf_t *conf, 1961 void (*offset_commit_cb)(rd_kafka_t *rk, 1962 rd_kafka_resp_err_t err, 1963 rd_kafka_topic_partition_list_t *offsets, 1964 void *opaque)); 1965 1966 1967 /** 1968 * @brief Set error callback in provided conf object. 1969 * 1970 * The error callback is used by librdkafka to signal warnings and errors 1971 * back to the application. 1972 * 1973 * These errors should generally be considered informational and non-permanent, 1974 * the client will try to recover automatically from all type of errors. 1975 * Given that the client and cluster configuration is correct the 1976 * application should treat these as temporary errors. 1977 * 1978 * \p error_cb will be triggered with \c err set to RD_KAFKA_RESP_ERR__FATAL 1979 * if a fatal error has been raised; in this case use rd_kafka_fatal_error() to 1980 * retrieve the fatal error code and error string, and then begin terminating 1981 * the client instance. 1982 * 1983 * If no \p error_cb is registered, or RD_KAFKA_EVENT_ERROR has not been set 1984 * with rd_kafka_conf_set_events, then the errors will be logged instead. 1985 * 1986 * The callback's \p opaque argument is the opaque set with 1987 * rd_kafka_conf_set_opaque(). 1988 */ 1989 RD_EXPORT 1990 void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, 1991 void (*error_cb)(rd_kafka_t *rk, 1992 int err, 1993 const char *reason, 1994 void *opaque)); 1995 1996 /** 1997 * @brief Set throttle callback. 1998 * 1999 * The throttle callback is used to forward broker throttle times to the 2000 * application for Produce and Fetch (consume) requests. 2001 * 2002 * Callbacks are triggered whenever a non-zero throttle time is returned by 2003 * the broker, or when the throttle time drops back to zero. 2004 * 2005 * An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at 2006 * regular intervals to serve queued callbacks. 2007 * 2008 * The callback's \p opaque argument is the opaque set with 2009 * rd_kafka_conf_set_opaque(). 2010 * 2011 * @remark Requires broker version 0.9.0 or later. 2012 */ 2013 RD_EXPORT 2014 void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, 2015 void (*throttle_cb)(rd_kafka_t *rk, 2016 const char *broker_name, 2017 int32_t broker_id, 2018 int throttle_time_ms, 2019 void *opaque)); 2020 2021 2022 /** 2023 * @brief Set logger callback. 2024 * 2025 * The default is to print to stderr, but a syslog logger is also available, 2026 * see rd_kafka_log_print and rd_kafka_log_syslog for the builtin alternatives. 2027 * Alternatively the application may provide its own logger callback. 2028 * Or pass \p func as NULL to disable logging. 2029 * 2030 * This is the configuration alternative to the deprecated rd_kafka_set_logger() 2031 * 2032 * @remark The log_cb will be called spontaneously from librdkafka's internal 2033 * threads unless logs have been forwarded to a poll queue through 2034 * \c rd_kafka_set_log_queue(). 2035 * An application MUST NOT call any librdkafka APIs or do any prolonged 2036 * work in a non-forwarded \c log_cb. 2037 */ 2038 RD_EXPORT 2039 void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, 2040 void (*log_cb)(const rd_kafka_t *rk, 2041 int level, 2042 const char *fac, 2043 const char *buf)); 2044 2045 2046 /** 2047 * @brief Set statistics callback in provided conf object. 2048 * 2049 * The statistics callback is triggered from rd_kafka_poll() every 2050 * \c statistics.interval.ms (needs to be configured separately). 2051 * Function arguments: 2052 * - \p rk - Kafka handle 2053 * - \p json - String containing the statistics data in JSON format 2054 * - \p json_len - Length of \p json string. 2055 * - \p opaque - Application-provided opaque as set by 2056 * rd_kafka_conf_set_opaque(). 2057 * 2058 * For more information on the format of \p json, see 2059 * https://github.com/edenhill/librdkafka/wiki/Statistics 2060 * 2061 * If the application wishes to hold on to the \p json pointer and free 2062 * it at a later time it must return 1 from the \p stats_cb. 2063 * If the application returns 0 from the \p stats_cb then librdkafka 2064 * will immediately free the \p json pointer. 2065 * 2066 * See STATISTICS.md for a full definition of the JSON object. 2067 */ 2068 RD_EXPORT 2069 void rd_kafka_conf_set_stats_cb( 2070 rd_kafka_conf_t *conf, 2071 int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque)); 2072 2073 /** 2074 * @brief Set SASL/OAUTHBEARER token refresh callback in provided conf object. 2075 * 2076 * @param conf the configuration to mutate. 2077 * @param oauthbearer_token_refresh_cb the callback to set; callback function 2078 * arguments:<br> 2079 * \p rk - Kafka handle<br> 2080 * \p oauthbearer_config - Value of configuration property 2081 * sasl.oauthbearer.config. 2082 * \p opaque - Application-provided opaque set via 2083 * rd_kafka_conf_set_opaque() 2084 * 2085 * The SASL/OAUTHBEARER token refresh callback is triggered via rd_kafka_poll() 2086 * whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved, 2087 * typically based on the configuration defined in \c sasl.oauthbearer.config. 2088 * 2089 * The callback should invoke rd_kafka_oauthbearer_set_token() 2090 * or rd_kafka_oauthbearer_set_token_failure() to indicate success 2091 * or failure, respectively. 2092 * 2093 * The refresh operation is eventable and may be received via 2094 * rd_kafka_queue_poll() with an event type of 2095 * \c RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH. 2096 * 2097 * Note that before any SASL/OAUTHBEARER broker connection can succeed the 2098 * application must call rd_kafka_oauthbearer_set_token() once -- either 2099 * directly or, more typically, by invoking either rd_kafka_poll(), 2100 * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), etc, in order to cause 2101 * retrieval of an initial token to occur. 2102 * 2103 * Alternatively, the application can enable the SASL queue by calling 2104 * rd_kafka_conf_enable_sasl_queue() on the configuration object prior to 2105 * creating the client instance, get the SASL queue with 2106 * rd_kafka_queue_get_sasl(), and either serve the queue manually by calling 2107 * rd_kafka_queue_poll(), or redirecting the queue to the background thread to 2108 * have the queue served automatically. For the latter case the SASL queue 2109 * must be forwarded to the background queue with rd_kafka_queue_forward(). 2110 * A convenience function is available to automatically forward the SASL queue 2111 * to librdkafka's background thread, see 2112 * rd_kafka_sasl_background_callbacks_enable(). 2113 * 2114 * An unsecured JWT refresh handler is provided by librdkafka for development 2115 * and testing purposes, it is enabled by setting 2116 * the \c enable.sasl.oauthbearer.unsecure.jwt property to true and is 2117 * mutually exclusive to using a refresh callback. 2118 * 2119 * @sa rd_kafka_sasl_background_callbacks_enable() 2120 * @sa rd_kafka_queue_get_sasl() 2121 */ 2122 RD_EXPORT 2123 void rd_kafka_conf_set_oauthbearer_token_refresh_cb( 2124 rd_kafka_conf_t *conf, 2125 void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, 2126 const char *oauthbearer_config, 2127 void *opaque)); 2128 2129 /** 2130 * @brief Enable/disable creation of a queue specific to SASL events 2131 * and callbacks. 2132 * 2133 * For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this 2134 * configuration API allows an application to get a dedicated 2135 * queue for the SASL events/callbacks. After enabling the queue with this API 2136 * the application can retrieve the queue by calling 2137 * rd_kafka_queue_get_sasl() on the client instance. 2138 * This queue may then be served directly by the application 2139 * (with rd_kafka_queue_poll(), et.al) or forwarded to another queue, such as 2140 * the background queue. 2141 * 2142 * A convenience function is available to automatically forward the SASL queue 2143 * to librdkafka's background thread, see 2144 * rd_kafka_sasl_background_callbacks_enable(). 2145 * 2146 * By default (\p enable = 0) the main queue (as served by rd_kafka_poll(), 2147 * et.al.) is used for SASL callbacks. 2148 * 2149 * @remark The SASL queue is currently only used by the SASL OAUTHBEARER 2150 * mechanism's token_refresh_cb(). 2151 * 2152 * @sa rd_kafka_queue_get_sasl() 2153 * @sa rd_kafka_sasl_background_callbacks_enable() 2154 */ 2155 2156 RD_EXPORT 2157 void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable); 2158 2159 2160 /** 2161 * @brief Set socket callback. 2162 * 2163 * The socket callback is responsible for opening a socket 2164 * according to the supplied \p domain, \p type and \p protocol. 2165 * The socket shall be created with \c CLOEXEC set in a racefree fashion, if 2166 * possible. 2167 * 2168 * The callback's \p opaque argument is the opaque set with 2169 * rd_kafka_conf_set_opaque(). 2170 * 2171 * Default: 2172 * - on linux: racefree CLOEXEC 2173 * - others : non-racefree CLOEXEC 2174 * 2175 * @remark The callback will be called from an internal librdkafka thread. 2176 */ 2177 RD_EXPORT 2178 void rd_kafka_conf_set_socket_cb( 2179 rd_kafka_conf_t *conf, 2180 int (*socket_cb)(int domain, int type, int protocol, void *opaque)); 2181 2182 2183 2184 /** 2185 * @brief Set connect callback. 2186 * 2187 * The connect callback is responsible for connecting socket \p sockfd 2188 * to peer address \p addr. 2189 * The \p id field contains the broker identifier. 2190 * 2191 * \p connect_cb shall return 0 on success (socket connected) or an error 2192 * number (errno) on error. 2193 * 2194 * The callback's \p opaque argument is the opaque set with 2195 * rd_kafka_conf_set_opaque(). 2196 * 2197 * @remark The callback will be called from an internal librdkafka thread. 2198 */ 2199 RD_EXPORT void 2200 rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, 2201 int (*connect_cb)(int sockfd, 2202 const struct sockaddr *addr, 2203 int addrlen, 2204 const char *id, 2205 void *opaque)); 2206 2207 /** 2208 * @brief Set close socket callback. 2209 * 2210 * Close a socket (optionally opened with socket_cb()). 2211 * 2212 * The callback's \p opaque argument is the opaque set with 2213 * rd_kafka_conf_set_opaque(). 2214 * 2215 * @remark The callback will be called from an internal librdkafka thread. 2216 */ 2217 RD_EXPORT void rd_kafka_conf_set_closesocket_cb( 2218 rd_kafka_conf_t *conf, 2219 int (*closesocket_cb)(int sockfd, void *opaque)); 2220 2221 2222 2223 #ifndef _WIN32 2224 /** 2225 * @brief Set open callback. 2226 * 2227 * The open callback is responsible for opening the file specified by 2228 * pathname, flags and mode. 2229 * The file shall be opened with \c CLOEXEC set in a racefree fashion, if 2230 * possible. 2231 * 2232 * Default: 2233 * - on linux: racefree CLOEXEC 2234 * - others : non-racefree CLOEXEC 2235 * 2236 * The callback's \p opaque argument is the opaque set with 2237 * rd_kafka_conf_set_opaque(). 2238 * 2239 * @remark The callback will be called from an internal librdkafka thread. 2240 */ 2241 RD_EXPORT 2242 void rd_kafka_conf_set_open_cb( 2243 rd_kafka_conf_t *conf, 2244 int (*open_cb)(const char *pathname, int flags, mode_t mode, void *opaque)); 2245 #endif 2246 2247 2248 /** 2249 * @brief Sets the verification callback of the broker certificate 2250 * 2251 * The verification callback is triggered from internal librdkafka threads 2252 * upon connecting to a broker. On each connection attempt the callback 2253 * will be called for each certificate in the broker's certificate chain, 2254 * starting at the root certification, as long as the application callback 2255 * returns 1 (valid certificate). 2256 * \c broker_name and \c broker_id correspond to the broker the connection 2257 * is being made to. 2258 * The \c x509_error argument indicates if OpenSSL's verification of 2259 * the certificate succeed (0) or failed (an OpenSSL error code). 2260 * The application may set the SSL context error code by returning 0 2261 * from the verify callback and providing a non-zero SSL context error code 2262 * in \c x509_error. 2263 * If the verify callback sets \c x509_error to 0, returns 1, and the 2264 * original \c x509_error was non-zero, the error on the SSL context will 2265 * be cleared. 2266 * \c x509_error is always a valid pointer to an int. 2267 * 2268 * \c depth is the depth of the current certificate in the chain, starting 2269 * at the root certificate. 2270 * 2271 * The certificate itself is passed in binary DER format in \c buf of 2272 * size \c size. 2273 * 2274 * The callback must return 1 if verification succeeds, or 2275 * 0 if verification fails and then write a human-readable error message 2276 * to \c errstr (limited to \c errstr_size bytes, including nul-term). 2277 * 2278 * The callback's \p opaque argument is the opaque set with 2279 * rd_kafka_conf_set_opaque(). 2280 * 2281 * @returns RD_KAFKA_CONF_OK if SSL is supported in this build, else 2282 * RD_KAFKA_CONF_INVALID. 2283 * 2284 * @warning This callback will be called from internal librdkafka threads. 2285 * 2286 * @remark See <openssl/x509_vfy.h> in the OpenSSL source distribution 2287 * for a list of \p x509_error codes. 2288 */ 2289 RD_EXPORT 2290 rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb( 2291 rd_kafka_conf_t *conf, 2292 int (*ssl_cert_verify_cb)(rd_kafka_t *rk, 2293 const char *broker_name, 2294 int32_t broker_id, 2295 int *x509_error, 2296 int depth, 2297 const char *buf, 2298 size_t size, 2299 char *errstr, 2300 size_t errstr_size, 2301 void *opaque)); 2302 2303 2304 /** 2305 * @enum rd_kafka_cert_type_t 2306 * 2307 * @brief SSL certificate type 2308 * 2309 * @sa rd_kafka_conf_set_ssl_cert 2310 */ 2311 typedef enum rd_kafka_cert_type_t { 2312 RD_KAFKA_CERT_PUBLIC_KEY, /**< Client's public key */ 2313 RD_KAFKA_CERT_PRIVATE_KEY, /**< Client's private key */ 2314 RD_KAFKA_CERT_CA, /**< CA certificate */ 2315 RD_KAFKA_CERT__CNT, 2316 } rd_kafka_cert_type_t; 2317 2318 /** 2319 * @enum rd_kafka_cert_enc_t 2320 * 2321 * @brief SSL certificate encoding 2322 * 2323 * @sa rd_kafka_conf_set_ssl_cert 2324 */ 2325 typedef enum rd_kafka_cert_enc_t { 2326 RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */ 2327 RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ 2328 RD_KAFKA_CERT_ENC_PEM, /**< PEM */ 2329 RD_KAFKA_CERT_ENC__CNT, 2330 } rd_kafka_cert_enc_t; 2331 2332 2333 /** 2334 * @brief Set certificate/key \p cert_type from the \p cert_enc encoded 2335 * memory at \p buffer of \p size bytes. 2336 * 2337 * @param conf Configuration object. 2338 * @param cert_type Certificate or key type to configure. 2339 * @param cert_enc Buffer \p encoding type. 2340 * @param buffer Memory pointer to encoded certificate or key. 2341 * The memory is not referenced after this function returns. 2342 * @param size Size of memory at \p buffer. 2343 * @param errstr Memory were a human-readable error string will be written 2344 * on failure. 2345 * @param errstr_size Size of \p errstr, including space for nul-terminator. 2346 * 2347 * @returns RD_KAFKA_CONF_OK on success or RD_KAFKA_CONF_INVALID if the 2348 * memory in \p buffer is of incorrect encoding, or if librdkafka 2349 * was not built with SSL support. 2350 * 2351 * @remark Calling this method multiple times with the same \p cert_type 2352 * will replace the previous value. 2353 * 2354 * @remark Calling this method with \p buffer set to NULL will clear the 2355 * configuration for \p cert_type. 2356 * 2357 * @remark The private key may require a password, which must be specified 2358 * with the `ssl.key.password` configuration property prior to 2359 * calling this function. 2360 * 2361 * @remark Private and public keys in PEM format may also be set with the 2362 * `ssl.key.pem` and `ssl.certificate.pem` configuration properties. 2363 * 2364 * @remark CA certificate in PEM format may also be set with the 2365 * `ssl.ca.pem` configuration property. 2366 */ 2367 RD_EXPORT rd_kafka_conf_res_t 2368 rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, 2369 rd_kafka_cert_type_t cert_type, 2370 rd_kafka_cert_enc_t cert_enc, 2371 const void *buffer, 2372 size_t size, 2373 char *errstr, 2374 size_t errstr_size); 2375 2376 2377 /** 2378 * @brief Set callback_data for OpenSSL engine. 2379 * 2380 * @param conf Configuration object. 2381 * @param callback_data passed to engine callbacks, 2382 * e.g. \c ENGINE_load_ssl_client_cert. 2383 * 2384 * @remark The \c ssl.engine.location configuration must be set for this 2385 * to have affect. 2386 * 2387 * @remark The memory pointed to by \p value must remain valid for the 2388 * lifetime of the configuration object and any Kafka clients that 2389 * use it. 2390 */ 2391 RD_EXPORT 2392 void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, 2393 void *callback_data); 2394 2395 2396 /** 2397 * @brief Sets the application's opaque pointer that will be passed to callbacks 2398 * 2399 * @sa rd_kafka_opaque() 2400 */ 2401 RD_EXPORT 2402 void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque); 2403 2404 /** 2405 * @brief Retrieves the opaque pointer previously set 2406 * with rd_kafka_conf_set_opaque() 2407 */ 2408 RD_EXPORT 2409 void *rd_kafka_opaque(const rd_kafka_t *rk); 2410 2411 2412 2413 /** 2414 * @brief Sets the default topic configuration to use for automatically 2415 * subscribed topics (e.g., through pattern-matched topics). 2416 * The topic config object is not usable after this call. 2417 * 2418 * @warning Any topic configuration settings that have been set on the 2419 * global rd_kafka_conf_t object will be overwritten by this call 2420 * since the implicitly created default topic config object is 2421 * replaced by the user-supplied one. 2422 * 2423 * @deprecated Set default topic level configuration on the 2424 * global rd_kafka_conf_t object instead. 2425 */ 2426 RD_EXPORT 2427 void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, 2428 rd_kafka_topic_conf_t *tconf); 2429 2430 /** 2431 * @brief Gets the default topic configuration as previously set with 2432 * rd_kafka_conf_set_default_topic_conf() or that was implicitly created 2433 * by configuring a topic-level property on the global \p conf object. 2434 * 2435 * @returns the \p conf's default topic configuration (if any), or NULL. 2436 * 2437 * @warning The returned topic configuration object is owned by the \p conf 2438 * object. It may be modified but not destroyed and its lifetime is 2439 * the same as the \p conf object or the next call to 2440 * rd_kafka_conf_set_default_topic_conf(). 2441 */ 2442 RD_EXPORT rd_kafka_topic_conf_t * 2443 rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf); 2444 2445 2446 /** 2447 * @brief Retrieve configuration value for property \p name. 2448 * 2449 * If \p dest is non-NULL the value will be written to \p dest with at 2450 * most \p dest_size. 2451 * 2452 * \p *dest_size is updated to the full length of the value, thus if 2453 * \p *dest_size initially is smaller than the full length the application 2454 * may reallocate \p dest to fit the returned \p *dest_size and try again. 2455 * 2456 * If \p dest is NULL only the full length of the value is returned. 2457 * 2458 * Fallthrough: 2459 * Topic-level configuration properties from the \c default_topic_conf 2460 * may be retrieved using this interface. 2461 * 2462 * @returns \p RD_KAFKA_CONF_OK if the property name matched, else 2463 * \p RD_KAFKA_CONF_UNKNOWN. 2464 */ 2465 RD_EXPORT 2466 rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, 2467 const char *name, 2468 char *dest, 2469 size_t *dest_size); 2470 2471 2472 /** 2473 * @brief Retrieve topic configuration value for property \p name. 2474 * 2475 * @sa rd_kafka_conf_get() 2476 */ 2477 RD_EXPORT 2478 rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, 2479 const char *name, 2480 char *dest, 2481 size_t *dest_size); 2482 2483 2484 /** 2485 * @brief Dump the configuration properties and values of \p conf to an array 2486 * with \"key\", \"value\" pairs. 2487 * 2488 * The number of entries in the array is returned in \p *cntp. 2489 * 2490 * The dump must be freed with `rd_kafka_conf_dump_free()`. 2491 */ 2492 RD_EXPORT 2493 const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp); 2494 2495 2496 /** 2497 * @brief Dump the topic configuration properties and values of \p conf 2498 * to an array with \"key\", \"value\" pairs. 2499 * 2500 * The number of entries in the array is returned in \p *cntp. 2501 * 2502 * The dump must be freed with `rd_kafka_conf_dump_free()`. 2503 */ 2504 RD_EXPORT 2505 const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, 2506 size_t *cntp); 2507 2508 /** 2509 * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or 2510 * `rd_kafka_topic_conf_dump(). 2511 */ 2512 RD_EXPORT 2513 void rd_kafka_conf_dump_free(const char **arr, size_t cnt); 2514 2515 /** 2516 * @brief Prints a table to \p fp of all supported configuration properties, 2517 * their default values as well as a description. 2518 * 2519 * @remark All properties and properties and values are shown, even those 2520 * that have been disabled at build time due to missing dependencies. 2521 */ 2522 RD_EXPORT 2523 void rd_kafka_conf_properties_show(FILE *fp); 2524 2525 /**@}*/ 2526 2527 2528 /** 2529 * @name Topic configuration 2530 * @{ 2531 * 2532 * @brief Topic configuration property interface 2533 * 2534 */ 2535 2536 2537 /** 2538 * @brief Create topic configuration object 2539 * 2540 * @sa Same semantics as for rd_kafka_conf_new(). 2541 */ 2542 RD_EXPORT 2543 rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void); 2544 2545 2546 /** 2547 * @brief Creates a copy/duplicate of topic configuration object \p conf. 2548 */ 2549 RD_EXPORT 2550 rd_kafka_topic_conf_t * 2551 rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf); 2552 2553 /** 2554 * @brief Creates a copy/duplicate of \p rk 's default topic configuration 2555 * object. 2556 */ 2557 RD_EXPORT 2558 rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk); 2559 2560 2561 /** 2562 * @brief Destroys a topic conf object. 2563 */ 2564 RD_EXPORT 2565 void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf); 2566 2567 2568 /** 2569 * @brief Sets a single rd_kafka_topic_conf_t value by property name. 2570 * 2571 * \p topic_conf should have been previously set up 2572 * with `rd_kafka_topic_conf_new()`. 2573 * 2574 * @returns rd_kafka_conf_res_t to indicate success or failure. 2575 */ 2576 RD_EXPORT 2577 rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, 2578 const char *name, 2579 const char *value, 2580 char *errstr, 2581 size_t errstr_size); 2582 2583 /** 2584 * @brief Sets the application's opaque pointer that will be passed to all topic 2585 * callbacks as the \c rkt_opaque argument. 2586 * 2587 * @sa rd_kafka_topic_opaque() 2588 */ 2589 RD_EXPORT 2590 void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, 2591 void *rkt_opaque); 2592 2593 2594 /** 2595 * @brief \b Producer: Set partitioner callback in provided topic conf object. 2596 * 2597 * The partitioner may be called in any thread at any time, 2598 * it may be called multiple times for the same message/key. 2599 * 2600 * The callback's \p rkt_opaque argument is the opaque set by 2601 * rd_kafka_topic_conf_set_opaque(). 2602 * The callback's \p msg_opaque argument is the per-message opaque 2603 * passed to produce(). 2604 * 2605 * Partitioner function constraints: 2606 * - MUST NOT call any rd_kafka_*() functions except: 2607 * rd_kafka_topic_partition_available() 2608 * - MUST NOT block or execute for prolonged periods of time. 2609 * - MUST return a value between 0 and partition_cnt-1, or the 2610 * special \c RD_KAFKA_PARTITION_UA value if partitioning 2611 * could not be performed. 2612 */ 2613 RD_EXPORT 2614 void rd_kafka_topic_conf_set_partitioner_cb( 2615 rd_kafka_topic_conf_t *topic_conf, 2616 int32_t (*partitioner)(const rd_kafka_topic_t *rkt, 2617 const void *keydata, 2618 size_t keylen, 2619 int32_t partition_cnt, 2620 void *rkt_opaque, 2621 void *msg_opaque)); 2622 2623 2624 /** 2625 * @brief \b Producer: Set message queueing order comparator callback. 2626 * 2627 * The callback may be called in any thread at any time, 2628 * it may be called multiple times for the same message. 2629 * 2630 * Ordering comparator function constraints: 2631 * - MUST be stable sort (same input gives same output). 2632 * - MUST NOT call any rd_kafka_*() functions. 2633 * - MUST NOT block or execute for prolonged periods of time. 2634 * 2635 * The comparator shall compare the two messages and return: 2636 * - < 0 if message \p a should be inserted before message \p b. 2637 * - >=0 if message \p a should be inserted after message \p b. 2638 * 2639 * @remark Insert sorting will be used to enqueue the message in the 2640 * correct queue position, this comes at a cost of O(n). 2641 * 2642 * @remark If `queuing.strategy=fifo` new messages are enqueued to the 2643 * tail of the queue regardless of msg_order_cmp, but retried messages 2644 * are still affected by msg_order_cmp. 2645 * 2646 * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL, 2647 * DO NOT USE IN PRODUCTION. 2648 */ 2649 RD_EXPORT void rd_kafka_topic_conf_set_msg_order_cmp( 2650 rd_kafka_topic_conf_t *topic_conf, 2651 int (*msg_order_cmp)(const rd_kafka_message_t *a, 2652 const rd_kafka_message_t *b)); 2653 2654 2655 /** 2656 * @brief Check if partition is available (has a leader broker). 2657 * 2658 * @returns 1 if the partition is available, else 0. 2659 * 2660 * @warning This function must only be called from inside a partitioner function 2661 */ 2662 RD_EXPORT 2663 int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, 2664 int32_t partition); 2665 2666 2667 /******************************************************************* 2668 * * 2669 * Partitioners provided by rdkafka * 2670 * * 2671 *******************************************************************/ 2672 2673 /** 2674 * @brief Random partitioner. 2675 * 2676 * Will try not to return unavailable partitions. 2677 * 2678 * The \p rkt_opaque argument is the opaque set by 2679 * rd_kafka_topic_conf_set_opaque(). 2680 * The \p msg_opaque argument is the per-message opaque 2681 * passed to produce(). 2682 * 2683 * @returns a random partition between 0 and \p partition_cnt - 1. 2684 * 2685 */ 2686 RD_EXPORT 2687 int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, 2688 const void *key, 2689 size_t keylen, 2690 int32_t partition_cnt, 2691 void *rkt_opaque, 2692 void *msg_opaque); 2693 2694 /** 2695 * @brief Consistent partitioner. 2696 * 2697 * Uses consistent hashing to map identical keys onto identical partitions. 2698 * 2699 * The \p rkt_opaque argument is the opaque set by 2700 * rd_kafka_topic_conf_set_opaque(). 2701 * The \p msg_opaque argument is the per-message opaque 2702 * passed to produce(). 2703 * 2704 * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on 2705 * the CRC value of the key 2706 */ 2707 RD_EXPORT 2708 int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, 2709 const void *key, 2710 size_t keylen, 2711 int32_t partition_cnt, 2712 void *rkt_opaque, 2713 void *msg_opaque); 2714 2715 /** 2716 * @brief Consistent-Random partitioner. 2717 * 2718 * This is the default partitioner. 2719 * Uses consistent hashing to map identical keys onto identical partitions, and 2720 * messages without keys will be assigned via the random partitioner. 2721 * 2722 * The \p rkt_opaque argument is the opaque set by 2723 * rd_kafka_topic_conf_set_opaque(). 2724 * The \p msg_opaque argument is the per-message opaque 2725 * passed to produce(). 2726 * 2727 * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on 2728 * the CRC value of the key (if provided) 2729 */ 2730 RD_EXPORT 2731 int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, 2732 const void *key, 2733 size_t keylen, 2734 int32_t partition_cnt, 2735 void *rkt_opaque, 2736 void *msg_opaque); 2737 2738 2739 /** 2740 * @brief Murmur2 partitioner (Java compatible). 2741 * 2742 * Uses consistent hashing to map identical keys onto identical partitions 2743 * using Java-compatible Murmur2 hashing. 2744 * 2745 * The \p rkt_opaque argument is the opaque set by 2746 * rd_kafka_topic_conf_set_opaque(). 2747 * The \p msg_opaque argument is the per-message opaque 2748 * passed to produce(). 2749 * 2750 * @returns a partition between 0 and \p partition_cnt - 1. 2751 */ 2752 RD_EXPORT 2753 int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, 2754 const void *key, 2755 size_t keylen, 2756 int32_t partition_cnt, 2757 void *rkt_opaque, 2758 void *msg_opaque); 2759 2760 /** 2761 * @brief Consistent-Random Murmur2 partitioner (Java compatible). 2762 * 2763 * Uses consistent hashing to map identical keys onto identical partitions 2764 * using Java-compatible Murmur2 hashing. 2765 * Messages without keys will be assigned via the random partitioner. 2766 * 2767 * The \p rkt_opaque argument is the opaque set by 2768 * rd_kafka_topic_conf_set_opaque(). 2769 * The \p msg_opaque argument is the per-message opaque 2770 * passed to produce(). 2771 * 2772 * @returns a partition between 0 and \p partition_cnt - 1. 2773 */ 2774 RD_EXPORT 2775 int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, 2776 const void *key, 2777 size_t keylen, 2778 int32_t partition_cnt, 2779 void *rkt_opaque, 2780 void *msg_opaque); 2781 2782 2783 /** 2784 * @brief FNV-1a partitioner. 2785 * 2786 * Uses consistent hashing to map identical keys onto identical partitions 2787 * using FNV-1a hashing. 2788 * 2789 * The \p rkt_opaque argument is the opaque set by 2790 * rd_kafka_topic_conf_set_opaque(). 2791 * The \p msg_opaque argument is the per-message opaque 2792 * passed to produce(). 2793 * 2794 * @returns a partition between 0 and \p partition_cnt - 1. 2795 */ 2796 RD_EXPORT 2797 int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, 2798 const void *key, 2799 size_t keylen, 2800 int32_t partition_cnt, 2801 void *rkt_opaque, 2802 void *msg_opaque); 2803 2804 2805 /** 2806 * @brief Consistent-Random FNV-1a partitioner. 2807 * 2808 * Uses consistent hashing to map identical keys onto identical partitions 2809 * using FNV-1a hashing. 2810 * Messages without keys will be assigned via the random partitioner. 2811 * 2812 * The \p rkt_opaque argument is the opaque set by 2813 * rd_kafka_topic_conf_set_opaque(). 2814 * The \p msg_opaque argument is the per-message opaque 2815 * passed to produce(). 2816 * 2817 * @returns a partition between 0 and \p partition_cnt - 1. 2818 */ 2819 RD_EXPORT 2820 int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, 2821 const void *key, 2822 size_t keylen, 2823 int32_t partition_cnt, 2824 void *rkt_opaque, 2825 void *msg_opaque); 2826 2827 2828 /**@}*/ 2829 2830 2831 2832 /** 2833 * @name Main Kafka and Topic object handles 2834 * @{ 2835 * 2836 * 2837 */ 2838 2839 2840 2841 /** 2842 * @brief Creates a new Kafka handle and starts its operation according to the 2843 * specified \p type (\p RD_KAFKA_CONSUMER or \p RD_KAFKA_PRODUCER). 2844 * 2845 * \p conf is an optional struct created with `rd_kafka_conf_new()` that will 2846 * be used instead of the default configuration. 2847 * The \p conf object is freed by this function on success and must not be used 2848 * or destroyed by the application sub-sequently. 2849 * See `rd_kafka_conf_set()` et.al for more information. 2850 * 2851 * \p errstr must be a pointer to memory of at least size \p errstr_size where 2852 * `rd_kafka_new()` may write a human readable error message in case the 2853 * creation of a new handle fails. In which case the function returns NULL. 2854 * 2855 * @remark \b RD_KAFKA_CONSUMER: When a new \p RD_KAFKA_CONSUMER 2856 * rd_kafka_t handle is created it may either operate in the 2857 * legacy simple consumer mode using the rd_kafka_consume_start() 2858 * interface, or the High-level KafkaConsumer API. 2859 * @remark An application must only use one of these groups of APIs on a given 2860 * rd_kafka_t RD_KAFKA_CONSUMER handle. 2861 2862 * 2863 * @returns The Kafka handle on success or NULL on error (see \p errstr) 2864 * 2865 * @sa To destroy the Kafka handle, use rd_kafka_destroy(). 2866 */ 2867 RD_EXPORT 2868 rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, 2869 rd_kafka_conf_t *conf, 2870 char *errstr, 2871 size_t errstr_size); 2872 2873 2874 /** 2875 * @brief Destroy Kafka handle. 2876 * 2877 * @remark This is a blocking operation. 2878 * @remark rd_kafka_consumer_close() will be called from this function 2879 * if the instance type is RD_KAFKA_CONSUMER, a \c group.id was 2880 * configured, and the rd_kafka_consumer_close() was not 2881 * explicitly called by the application. This in turn may 2882 * trigger consumer callbacks, such as rebalance_cb. 2883 * Use rd_kafka_destroy_flags() with 2884 * RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE to avoid this behaviour. 2885 * 2886 * @sa rd_kafka_destroy_flags() 2887 */ 2888 RD_EXPORT 2889 void rd_kafka_destroy(rd_kafka_t *rk); 2890 2891 2892 /** 2893 * @brief Destroy Kafka handle according to specified destroy flags 2894 * 2895 */ 2896 RD_EXPORT 2897 void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags); 2898 2899 /** 2900 * @brief Flags for rd_kafka_destroy_flags() 2901 */ 2902 2903 /*! 2904 * Don't call consumer_close() to leave group and commit final offsets. 2905 * 2906 * This also disables consumer callbacks to be called from rd_kafka_destroy*(), 2907 * such as rebalance_cb. 2908 * 2909 * The consumer group handler is still closed internally, but from an 2910 * application perspective none of the functionality from consumer_close() 2911 * is performed. 2912 */ 2913 #define RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE 0x8 2914 2915 2916 2917 /** 2918 * @brief Returns Kafka handle name. 2919 */ 2920 RD_EXPORT 2921 const char *rd_kafka_name(const rd_kafka_t *rk); 2922 2923 2924 /** 2925 * @brief Returns Kafka handle type. 2926 */ 2927 RD_EXPORT 2928 rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk); 2929 2930 2931 /** 2932 * @brief Returns this client's broker-assigned group member id. 2933 * 2934 * @remark This currently requires the high-level KafkaConsumer 2935 * 2936 * @returns An allocated string containing the current broker-assigned group 2937 * member id, or NULL if not available. 2938 * The application must free the string with \p free() or 2939 * rd_kafka_mem_free() 2940 */ 2941 RD_EXPORT 2942 char *rd_kafka_memberid(const rd_kafka_t *rk); 2943 2944 2945 2946 /** 2947 * @brief Returns the ClusterId as reported in broker metadata. 2948 * 2949 * @param rk Client instance. 2950 * @param timeout_ms If there is no cached value from metadata retrieval 2951 * then this specifies the maximum amount of time 2952 * (in milliseconds) the call will block waiting 2953 * for metadata to be retrieved. 2954 * Use 0 for non-blocking calls. 2955 2956 * @remark Requires broker version >=0.10.0 and api.version.request=true. 2957 * 2958 * @remark The application must free the returned pointer 2959 * using rd_kafka_mem_free(). 2960 * 2961 * @returns a newly allocated string containing the ClusterId, or NULL 2962 * if no ClusterId could be retrieved in the allotted timespan. 2963 */ 2964 RD_EXPORT 2965 char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms); 2966 2967 2968 /** 2969 * @brief Returns the current ControllerId as reported in broker metadata. 2970 * 2971 * @param rk Client instance. 2972 * @param timeout_ms If there is no cached value from metadata retrieval 2973 * then this specifies the maximum amount of time 2974 * (in milliseconds) the call will block waiting 2975 * for metadata to be retrieved. 2976 * Use 0 for non-blocking calls. 2977 2978 * @remark Requires broker version >=0.10.0 and api.version.request=true. 2979 * 2980 * @returns the controller broker id (>= 0), or -1 if no ControllerId could be 2981 * retrieved in the allotted timespan. 2982 */ 2983 RD_EXPORT 2984 int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms); 2985 2986 2987 /** 2988 * @brief Creates a new topic handle for topic named \p topic. 2989 * 2990 * \p conf is an optional configuration for the topic created with 2991 * `rd_kafka_topic_conf_new()` that will be used instead of the default 2992 * topic configuration. 2993 * The \p conf object is freed by this function and must not be used or 2994 * destroyed by the application sub-sequently. 2995 * See `rd_kafka_topic_conf_set()` et.al for more information. 2996 * 2997 * Topic handles are refcounted internally and calling rd_kafka_topic_new() 2998 * again with the same topic name will return the previous topic handle 2999 * without updating the original handle's configuration. 3000 * Applications must eventually call rd_kafka_topic_destroy() for each 3001 * succesfull call to rd_kafka_topic_new() to clear up resources. 3002 * 3003 * @returns the new topic handle or NULL on error (use rd_kafka_errno2err() 3004 * to convert system \p errno to an rd_kafka_resp_err_t error code. 3005 * 3006 * @sa rd_kafka_topic_destroy() 3007 */ 3008 RD_EXPORT 3009 rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, 3010 const char *topic, 3011 rd_kafka_topic_conf_t *conf); 3012 3013 3014 3015 /** 3016 * @brief Loose application's topic handle refcount as previously created 3017 * with `rd_kafka_topic_new()`. 3018 * 3019 * @remark Since topic objects are refcounted (both internally and for the app) 3020 * the topic object might not actually be destroyed by this call, 3021 * but the application must consider the object destroyed. 3022 */ 3023 RD_EXPORT 3024 void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt); 3025 3026 3027 /** 3028 * @brief Returns the topic name. 3029 */ 3030 RD_EXPORT 3031 const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt); 3032 3033 3034 /** 3035 * @brief Get the \p rkt_opaque pointer that was set in the topic configuration 3036 * with rd_kafka_topic_conf_set_opaque(). 3037 */ 3038 RD_EXPORT 3039 void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt); 3040 3041 3042 /** 3043 * @brief Unassigned partition. 3044 * 3045 * The unassigned partition is used by the producer API for messages 3046 * that should be partitioned using the configured or default partitioner. 3047 */ 3048 #define RD_KAFKA_PARTITION_UA ((int32_t)-1) 3049 3050 3051 /** 3052 * @brief Polls the provided kafka handle for events. 3053 * 3054 * Events will cause application provided callbacks to be called. 3055 * 3056 * The \p timeout_ms argument specifies the maximum amount of time 3057 * (in milliseconds) that the call will block waiting for events. 3058 * For non-blocking calls, provide 0 as \p timeout_ms. 3059 * To wait indefinately for an event, provide -1. 3060 * 3061 * @remark An application should make sure to call poll() at regular 3062 * intervals to serve any queued callbacks waiting to be called. 3063 * @remark If your producer doesn't have any callback set (in particular 3064 * via rd_kafka_conf_set_dr_msg_cb or rd_kafka_conf_set_error_cb) 3065 * you might chose not to call poll(), though this is not 3066 * recommended. 3067 * 3068 * Events: 3069 * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] 3070 * - error callbacks (rd_kafka_conf_set_error_cb()) [all] 3071 * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all] 3072 * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all] 3073 * - OAUTHBEARER token refresh callbacks 3074 * (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all] 3075 * 3076 * @returns the number of events served. 3077 */ 3078 RD_EXPORT 3079 int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms); 3080 3081 3082 /** 3083 * @brief Cancels the current callback dispatcher (rd_kafka_poll(), 3084 * rd_kafka_consume_callback(), etc). 3085 * 3086 * A callback may use this to force an immediate return to the calling 3087 * code (caller of e.g. rd_kafka_poll()) without processing any further 3088 * events. 3089 * 3090 * @remark This function MUST ONLY be called from within a librdkafka callback. 3091 */ 3092 RD_EXPORT 3093 void rd_kafka_yield(rd_kafka_t *rk); 3094 3095 3096 3097 /** 3098 * @brief Pause producing or consumption for the provided list of partitions. 3099 * 3100 * Success or error is returned per-partition \p err in the \p partitions list. 3101 * 3102 * @returns RD_KAFKA_RESP_ERR_NO_ERROR 3103 */ 3104 RD_EXPORT rd_kafka_resp_err_t 3105 rd_kafka_pause_partitions(rd_kafka_t *rk, 3106 rd_kafka_topic_partition_list_t *partitions); 3107 3108 3109 3110 /** 3111 * @brief Resume producing consumption for the provided list of partitions. 3112 * 3113 * Success or error is returned per-partition \p err in the \p partitions list. 3114 * 3115 * @returns RD_KAFKA_RESP_ERR_NO_ERROR 3116 */ 3117 RD_EXPORT rd_kafka_resp_err_t 3118 rd_kafka_resume_partitions(rd_kafka_t *rk, 3119 rd_kafka_topic_partition_list_t *partitions); 3120 3121 3122 3123 /** 3124 * @brief Query broker for low (oldest/beginning) and high (newest/end) offsets 3125 * for partition. 3126 * 3127 * Offsets are returned in \p *low and \p *high respectively. 3128 * 3129 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. 3130 */ 3131 RD_EXPORT rd_kafka_resp_err_t 3132 rd_kafka_query_watermark_offsets(rd_kafka_t *rk, 3133 const char *topic, 3134 int32_t partition, 3135 int64_t *low, 3136 int64_t *high, 3137 int timeout_ms); 3138 3139 3140 /** 3141 * @brief Get last known low (oldest/beginning) and high (newest/end) offsets 3142 * for partition. 3143 * 3144 * The low offset is updated periodically (if statistics.interval.ms is set) 3145 * while the high offset is updated on each fetched message set from the broker. 3146 * 3147 * If there is no cached offset (either low or high, or both) then 3148 * RD_KAFKA_OFFSET_INVALID will be returned for the respective offset. 3149 * 3150 * Offsets are returned in \p *low and \p *high respectively. 3151 * 3152 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. 3153 * 3154 * @remark Shall only be used with an active consumer instance. 3155 */ 3156 RD_EXPORT rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, 3157 const char *topic, 3158 int32_t partition, 3159 int64_t *low, 3160 int64_t *high); 3161 3162 3163 3164 /** 3165 * @brief Look up the offsets for the given partitions by timestamp. 3166 * 3167 * The returned offset for each partition is the earliest offset whose 3168 * timestamp is greater than or equal to the given timestamp in the 3169 * corresponding partition. 3170 * 3171 * The timestamps to query are represented as \c offset in \p offsets 3172 * on input, and \c offset will contain the offset on output. 3173 * 3174 * The function will block for at most \p timeout_ms milliseconds. 3175 * 3176 * @remark Duplicate Topic+Partitions are not supported. 3177 * @remark Per-partition errors may be returned in \c 3178 * rd_kafka_topic_partition_t.err 3179 * 3180 * @returns RD_KAFKA_RESP_ERR_NO_ERROR if offsets were be queried (do note 3181 * that per-partition errors might be set), 3182 * RD_KAFKA_RESP_ERR__TIMED_OUT if not all offsets could be fetched 3183 * within \p timeout_ms, 3184 * RD_KAFKA_RESP_ERR__INVALID_ARG if the \p offsets list is empty, 3185 * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if all partitions are unknown, 3186 * RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE if unable to query leaders 3187 * for the given partitions. 3188 */ 3189 RD_EXPORT rd_kafka_resp_err_t 3190 rd_kafka_offsets_for_times(rd_kafka_t *rk, 3191 rd_kafka_topic_partition_list_t *offsets, 3192 int timeout_ms); 3193 3194 3195 3196 /** 3197 * @brief Allocate and zero memory using the same allocator librdkafka uses. 3198 * 3199 * This is typically an abstraction for the calloc(3) call and makes sure 3200 * the application can use the same memory allocator as librdkafka for 3201 * allocating pointers that are used by librdkafka. 3202 * 3203 * \p rk can be set to return memory allocated by a specific \c rk instance 3204 * otherwise pass NULL for \p rk. 3205 * 3206 * @remark Memory allocated by rd_kafka_mem_calloc() must be freed using 3207 * rd_kafka_mem_free() 3208 */ 3209 RD_EXPORT 3210 void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size); 3211 3212 3213 3214 /** 3215 * @brief Allocate memory using the same allocator librdkafka uses. 3216 * 3217 * This is typically an abstraction for the malloc(3) call and makes sure 3218 * the application can use the same memory allocator as librdkafka for 3219 * allocating pointers that are used by librdkafka. 3220 * 3221 * \p rk can be set to return memory allocated by a specific \c rk instance 3222 * otherwise pass NULL for \p rk. 3223 * 3224 * @remark Memory allocated by rd_kafka_mem_malloc() must be freed using 3225 * rd_kafka_mem_free() 3226 */ 3227 RD_EXPORT 3228 void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size); 3229 3230 3231 3232 /** 3233 * @brief Free pointer returned by librdkafka 3234 * 3235 * This is typically an abstraction for the free(3) call and makes sure 3236 * the application can use the same memory allocator as librdkafka for 3237 * freeing pointers returned by librdkafka. 3238 * 3239 * In standard setups it is usually not necessary to use this interface 3240 * rather than the free(3) functione. 3241 * 3242 * \p rk must be set for memory returned by APIs that take an \c rk argument, 3243 * for other APIs pass NULL for \p rk. 3244 * 3245 * @remark rd_kafka_mem_free() must only be used for pointers returned by APIs 3246 * that explicitly mention using this function for freeing. 3247 */ 3248 RD_EXPORT 3249 void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr); 3250 3251 3252 /**@}*/ 3253 3254 3255 3256 /** 3257 * @name Queue API 3258 * @{ 3259 * 3260 * Message queues allows the application to re-route consumed messages 3261 * from multiple topic+partitions into one single queue point. 3262 * This queue point containing messages from a number of topic+partitions 3263 * may then be served by a single rd_kafka_consume*_queue() call, 3264 * rather than one call per topic+partition combination. 3265 */ 3266 3267 3268 /** 3269 * @brief Create a new message queue. 3270 * 3271 * See rd_kafka_consume_start_queue(), rd_kafka_consume_queue(), et.al. 3272 */ 3273 RD_EXPORT 3274 rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk); 3275 3276 /** 3277 * Destroy a queue, purging all of its enqueued messages. 3278 */ 3279 RD_EXPORT 3280 void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu); 3281 3282 3283 /** 3284 * @returns a reference to the main librdkafka event queue. 3285 * This is the queue served by rd_kafka_poll(). 3286 * 3287 * Use rd_kafka_queue_destroy() to loose the reference. 3288 */ 3289 RD_EXPORT 3290 rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk); 3291 3292 3293 3294 /** 3295 * @returns a reference to the SASL callback queue, if a SASL mechanism 3296 * with callbacks is configured (currently only OAUTHBEARER), else 3297 * returns NULL. 3298 * 3299 * Use rd_kafka_queue_destroy() to loose the reference. 3300 * 3301 * @sa rd_kafka_sasl_background_callbacks_enable() 3302 */ 3303 RD_EXPORT 3304 rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk); 3305 3306 3307 /** 3308 * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka 3309 * background thread. 3310 * 3311 * This serves as an alternative for applications that do not call 3312 * rd_kafka_poll() (et.al.) at regular intervals (or not at all), as a means 3313 * of automatically trigger the refresh callbacks, which are needed to 3314 * initiate connections to the brokers in the case a custom OAUTHBEARER 3315 * refresh callback is configured. 3316 * 3317 * @returns NULL on success or an error object on error. 3318 * 3319 * @sa rd_kafka_queue_get_sasl() 3320 * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb() 3321 */ 3322 RD_EXPORT 3323 rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk); 3324 3325 3326 /** 3327 * @returns a reference to the librdkafka consumer queue. 3328 * This is the queue served by rd_kafka_consumer_poll(). 3329 * 3330 * Use rd_kafka_queue_destroy() to loose the reference. 3331 * 3332 * @remark rd_kafka_queue_destroy() MUST be called on this queue 3333 * prior to calling rd_kafka_consumer_close(). 3334 */ 3335 RD_EXPORT 3336 rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk); 3337 3338 /** 3339 * @returns a reference to the partition's queue, or NULL if 3340 * partition is invalid. 3341 * 3342 * Use rd_kafka_queue_destroy() to loose the reference. 3343 * 3344 * @remark rd_kafka_queue_destroy() MUST be called on this queue 3345 * 3346 * @remark This function only works on consumers. 3347 */ 3348 RD_EXPORT 3349 rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, 3350 const char *topic, 3351 int32_t partition); 3352 3353 /** 3354 * @returns a reference to the background thread queue, or NULL if the 3355 * background queue is not enabled. 3356 * 3357 * The background thread queue provides the application with an automatically 3358 * polled queue that triggers the event callback in a background thread, 3359 * this background thread is completely managed by librdkafka. 3360 * 3361 * The background thread queue is automatically created if a generic event 3362 * handler callback is configured with rd_kafka_conf_set_background_event_cb() 3363 * or if rd_kafka_queue_get_background() is called. 3364 * 3365 * The background queue is polled and served by librdkafka and MUST NOT be 3366 * polled, forwarded, or otherwise managed by the application, it may only 3367 * be used as the destination queue passed to queue-enabled APIs, such as 3368 * the Admin API. 3369 * 3370 * Use rd_kafka_queue_destroy() to loose the reference. 3371 * 3372 * @warning The background queue MUST NOT be read from (polled, consumed, etc), 3373 * or forwarded from. 3374 */ 3375 RD_EXPORT 3376 rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk); 3377 3378 3379 /** 3380 * @brief Forward/re-route queue \p src to \p dst. 3381 * If \p dst is \c NULL the forwarding is removed. 3382 * 3383 * The internal refcounts for both queues are increased. 3384 * 3385 * @remark Regardless of whether \p dst is NULL or not, after calling this 3386 * function, \p src will not forward it's fetch queue to the consumer 3387 * queue. 3388 */ 3389 RD_EXPORT 3390 void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst); 3391 3392 /** 3393 * @brief Forward librdkafka logs (and debug) to the specified queue 3394 * for serving with one of the ..poll() calls. 3395 * 3396 * This allows an application to serve log callbacks (\c log_cb) 3397 * in its thread of choice. 3398 * 3399 * @param rk Client instance. 3400 * @param rkqu Queue to forward logs to. If the value is NULL the logs 3401 * are forwarded to the main queue. 3402 * 3403 * @remark The configuration property \c log.queue MUST also be set to true. 3404 * 3405 * @remark librdkafka maintains its own reference to the provided queue. 3406 * 3407 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error, 3408 * eg RD_KAFKA_RESP_ERR__NOT_CONFIGURED when log.queue is not set to true. 3409 */ 3410 RD_EXPORT 3411 rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, 3412 rd_kafka_queue_t *rkqu); 3413 3414 3415 /** 3416 * @returns the current number of elements in queue. 3417 */ 3418 RD_EXPORT 3419 size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu); 3420 3421 3422 /** 3423 * @brief Enable IO event triggering for queue. 3424 * 3425 * To ease integration with IO based polling loops this API 3426 * allows an application to create a separate file-descriptor 3427 * that librdkafka will write \p payload (of size \p size) to 3428 * whenever a new element is enqueued on a previously empty queue. 3429 * 3430 * To remove event triggering call with \p fd = -1. 3431 * 3432 * librdkafka will maintain a copy of the \p payload. 3433 * 3434 * @remark IO and callback event triggering are mutually exclusive. 3435 * @remark When using forwarded queues the IO event must only be enabled 3436 * on the final forwarded-to (destination) queue. 3437 * @remark The file-descriptor/socket must be set to non-blocking. 3438 */ 3439 RD_EXPORT 3440 void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, 3441 int fd, 3442 const void *payload, 3443 size_t size); 3444 3445 /** 3446 * @brief Enable callback event triggering for queue. 3447 * 3448 * The callback will be called from an internal librdkafka thread 3449 * when a new element is enqueued on a previously empty queue. 3450 * 3451 * To remove event triggering call with \p event_cb = NULL. 3452 * 3453 * The \p qev_opaque is passed to the callback's \p qev_opaque argument. 3454 * 3455 * @remark IO and callback event triggering are mutually exclusive. 3456 * @remark Since the callback may be triggered from internal librdkafka 3457 * threads, the application must not perform any pro-longed work in 3458 * the callback, or call any librdkafka APIs (for the same rd_kafka_t 3459 * handle). 3460 */ 3461 RD_EXPORT 3462 void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, 3463 void (*event_cb)(rd_kafka_t *rk, 3464 void *qev_opaque), 3465 void *qev_opaque); 3466 3467 3468 /** 3469 * @brief Cancels the current rd_kafka_queue_poll() on \p rkqu. 3470 * 3471 * An application may use this from another thread to force 3472 * an immediate return to the calling code (caller of rd_kafka_queue_poll()). 3473 * Must not be used from signal handlers since that may cause deadlocks. 3474 */ 3475 RD_EXPORT 3476 void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu); 3477 3478 3479 /**@}*/ 3480 3481 /** 3482 * 3483 * @name Simple Consumer API (legacy) 3484 * @{ 3485 * 3486 */ 3487 3488 3489 #define RD_KAFKA_OFFSET_BEGINNING \ 3490 -2 /**< Start consuming from beginning of \ 3491 * kafka partition queue: oldest msg */ 3492 #define RD_KAFKA_OFFSET_END \ 3493 -1 /**< Start consuming from end of kafka \ 3494 * partition queue: next msg */ 3495 #define RD_KAFKA_OFFSET_STORED \ 3496 -1000 /**< Start consuming from offset retrieved \ 3497 * from offset store */ 3498 #define RD_KAFKA_OFFSET_INVALID -1001 /**< Invalid offset */ 3499 3500 3501 /** @cond NO_DOC */ 3502 #define RD_KAFKA_OFFSET_TAIL_BASE -2000 /* internal: do not use */ 3503 /** @endcond */ 3504 3505 /** 3506 * @brief Start consuming \p CNT messages from topic's current end offset. 3507 * 3508 * That is, if current end offset is 12345 and \p CNT is 200, it will start 3509 * consuming from offset \c 12345-200 = \c 12145. */ 3510 #define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT)) 3511 3512 /** 3513 * @brief Start consuming messages for topic \p rkt and \p partition 3514 * at offset \p offset which may either be an absolute \c (0..N) 3515 * or one of the logical offsets: 3516 * - RD_KAFKA_OFFSET_BEGINNING 3517 * - RD_KAFKA_OFFSET_END 3518 * - RD_KAFKA_OFFSET_STORED 3519 * - RD_KAFKA_OFFSET_TAIL 3520 * 3521 * rdkafka will attempt to keep \c queued.min.messages (config property) 3522 * messages in the local queue by repeatedly fetching batches of messages 3523 * from the broker until the threshold is reached. 3524 * 3525 * The application shall use one of the `rd_kafka_consume*()` functions 3526 * to consume messages from the local queue, each kafka message being 3527 * represented as a `rd_kafka_message_t *` object. 3528 * 3529 * `rd_kafka_consume_start()` must not be called multiple times for the same 3530 * topic and partition without stopping consumption first with 3531 * `rd_kafka_consume_stop()`. 3532 * 3533 * @returns 0 on success or -1 on error in which case errno is set accordingly: 3534 * - EBUSY - Conflicts with an existing or previous subscription 3535 * (RD_KAFKA_RESP_ERR__CONFLICT) 3536 * - EINVAL - Invalid offset, or incomplete configuration (lacking group.id) 3537 * (RD_KAFKA_RESP_ERR__INVALID_ARG) 3538 * - ESRCH - requested \p partition is invalid. 3539 * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) 3540 * - ENOENT - topic is unknown in the Kafka cluster. 3541 * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) 3542 * 3543 * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t` 3544 */ 3545 RD_EXPORT 3546 int rd_kafka_consume_start(rd_kafka_topic_t *rkt, 3547 int32_t partition, 3548 int64_t offset); 3549 3550 /** 3551 * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to 3552 * the provided queue \p rkqu (which must have been previously allocated 3553 * with `rd_kafka_queue_new()`. 3554 * 3555 * The application must use one of the `rd_kafka_consume_*_queue()` functions 3556 * to receive fetched messages. 3557 * 3558 * `rd_kafka_consume_start_queue()` must not be called multiple times for the 3559 * same topic and partition without stopping consumption first with 3560 * `rd_kafka_consume_stop()`. 3561 * `rd_kafka_consume_start()` and `rd_kafka_consume_start_queue()` must not 3562 * be combined for the same topic and partition. 3563 */ 3564 RD_EXPORT 3565 int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, 3566 int32_t partition, 3567 int64_t offset, 3568 rd_kafka_queue_t *rkqu); 3569 3570 /** 3571 * @brief Stop consuming messages for topic \p rkt and \p partition, purging 3572 * all messages currently in the local queue. 3573 * 3574 * NOTE: To enforce synchronisation this call will block until the internal 3575 * fetcher has terminated and offsets are committed to configured 3576 * storage method. 3577 * 3578 * The application needs to be stop all consumers before calling 3579 * `rd_kafka_destroy()` on the main object handle. 3580 * 3581 * @returns 0 on success or -1 on error (see `errno`). 3582 */ 3583 RD_EXPORT 3584 int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition); 3585 3586 3587 3588 /** 3589 * @brief Seek consumer for topic+partition to \p offset which is either an 3590 * absolute or logical offset. 3591 * 3592 * If \p timeout_ms is specified (not 0) the seek call will wait this long 3593 * for the consumer to update its fetcher state for the given partition with 3594 * the new offset. This guarantees that no previously fetched messages for the 3595 * old offset (or fetch position) will be passed to the application. 3596 * 3597 * If the timeout is reached the internal state will be unknown to the caller 3598 * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. 3599 * 3600 * If \p timeout_ms is 0 it will initiate the seek but return 3601 * immediately without any error reporting (e.g., async). 3602 * 3603 * This call will purge all pre-fetched messages for the given partition, which 3604 * may be up to \c queued.max.message.kbytes in size. Repeated use of seek 3605 * may thus lead to increased network usage as messages are re-fetched from 3606 * the broker. 3607 * 3608 * @remark Seek must only be performed for already assigned/consumed partitions, 3609 * use rd_kafka_assign() (et.al) to set the initial starting offset 3610 * for a new assignmenmt. 3611 * 3612 * @returns `RD_KAFKA_RESP_ERR__NO_ERROR` on success else an error code. 3613 * 3614 * @deprecated Use rd_kafka_seek_partitions(). 3615 */ 3616 RD_EXPORT 3617 rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, 3618 int32_t partition, 3619 int64_t offset, 3620 int timeout_ms); 3621 3622 3623 3624 /** 3625 * @brief Seek consumer for partitions in \p partitions to the per-partition 3626 * offset in the \c .offset field of \p partitions. 3627 * 3628 * The offset may be either absolute (>= 0) or a logical offset. 3629 * 3630 * If \p timeout_ms is specified (not 0) the seek call will wait this long 3631 * for the consumer to update its fetcher state for the given partition with 3632 * the new offset. This guarantees that no previously fetched messages for the 3633 * old offset (or fetch position) will be passed to the application. 3634 * 3635 * If the timeout is reached the internal state will be unknown to the caller 3636 * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. 3637 * 3638 * If \p timeout_ms is 0 it will initiate the seek but return 3639 * immediately without any error reporting (e.g., async). 3640 * 3641 * This call will purge all pre-fetched messages for the given partition, which 3642 * may be up to \c queued.max.message.kbytes in size. Repeated use of seek 3643 * may thus lead to increased network usage as messages are re-fetched from 3644 * the broker. 3645 * 3646 * Individual partition errors are reported in the per-partition \c .err field 3647 * of \p partitions. 3648 * 3649 * @remark Seek must only be performed for already assigned/consumed partitions, 3650 * use rd_kafka_assign() (et.al) to set the initial starting offset 3651 * for a new assignmenmt. 3652 * 3653 * @returns NULL on success or an error object on failure. 3654 */ 3655 RD_EXPORT rd_kafka_error_t * 3656 rd_kafka_seek_partitions(rd_kafka_t *rk, 3657 rd_kafka_topic_partition_list_t *partitions, 3658 int timeout_ms); 3659 3660 3661 /** 3662 * @brief Consume a single message from topic \p rkt and \p partition 3663 * 3664 * \p timeout_ms is maximum amount of time to wait for a message to be received. 3665 * Consumer must have been previously started with `rd_kafka_consume_start()`. 3666 * 3667 * @returns a message object on success or \c NULL on error. 3668 * The message object must be destroyed with `rd_kafka_message_destroy()` 3669 * when the application is done with it. 3670 * 3671 * Errors (when returning NULL): 3672 * - ETIMEDOUT - \p timeout_ms was reached with no new messages fetched. 3673 * - ENOENT - \p rkt + \p partition is unknown. 3674 * (no prior `rd_kafka_consume_start()` call) 3675 * 3676 * NOTE: The returned message's \c ..->err must be checked for errors. 3677 * NOTE: \c ..->err \c == \c RD_KAFKA_RESP_ERR__PARTITION_EOF signals that the 3678 * end of the partition has been reached, which should typically not be 3679 * considered an error. The application should handle this case 3680 * (e.g., ignore). 3681 * 3682 * @remark on_consume() interceptors may be called from this function prior to 3683 * passing message to application. 3684 */ 3685 RD_EXPORT 3686 rd_kafka_message_t * 3687 rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms); 3688 3689 3690 3691 /** 3692 * @brief Consume up to \p rkmessages_size from topic \p rkt and \p partition 3693 * putting a pointer to each message in the application provided 3694 * array \p rkmessages (of size \p rkmessages_size entries). 3695 * 3696 * `rd_kafka_consume_batch()` provides higher throughput performance 3697 * than `rd_kafka_consume()`. 3698 * 3699 * \p timeout_ms is the maximum amount of time to wait for all of 3700 * \p rkmessages_size messages to be put into \p rkmessages. 3701 * If no messages were available within the timeout period this function 3702 * returns 0 and \p rkmessages remains untouched. 3703 * This differs somewhat from `rd_kafka_consume()`. 3704 * 3705 * The message objects must be destroyed with `rd_kafka_message_destroy()` 3706 * when the application is done with it. 3707 * 3708 * @returns the number of rkmessages added in \p rkmessages, 3709 * or -1 on error (same error codes as for `rd_kafka_consume()`. 3710 * 3711 * @sa rd_kafka_consume() 3712 * 3713 * @remark on_consume() interceptors may be called from this function prior to 3714 * passing message to application. 3715 */ 3716 RD_EXPORT 3717 ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, 3718 int32_t partition, 3719 int timeout_ms, 3720 rd_kafka_message_t **rkmessages, 3721 size_t rkmessages_size); 3722 3723 3724 3725 /** 3726 * @brief Consumes messages from topic \p rkt and \p partition, calling 3727 * the provided callback for each consumed messsage. 3728 * 3729 * `rd_kafka_consume_callback()` provides higher throughput performance 3730 * than both `rd_kafka_consume()` and `rd_kafka_consume_batch()`. 3731 * 3732 * \p timeout_ms is the maximum amount of time to wait for one or more messages 3733 * to arrive. 3734 * 3735 * The provided \p consume_cb function is called for each message, 3736 * the application \b MUST \b NOT call `rd_kafka_message_destroy()` on the 3737 * provided \p rkmessage. 3738 * 3739 * The \p commit_opaque argument is passed to the \p consume_cb 3740 * as \p commit_opaque. 3741 * 3742 * @returns the number of messages processed or -1 on error. 3743 * 3744 * @sa rd_kafka_consume() 3745 * 3746 * @remark on_consume() interceptors may be called from this function prior to 3747 * passing message to application. 3748 * 3749 * @remark This function will return early if a transaction control message is 3750 * received, these messages are not exposed to the application but 3751 * still enqueued on the consumer queue to make sure their 3752 * offsets are stored. 3753 * 3754 * @deprecated This API is deprecated and subject for future removal. 3755 * There is no new callback-based consume interface, use the 3756 * poll/queue based alternatives. 3757 */ 3758 RD_EXPORT 3759 int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, 3760 int32_t partition, 3761 int timeout_ms, 3762 void (*consume_cb)(rd_kafka_message_t *rkmessage, 3763 void *commit_opaque), 3764 void *commit_opaque); 3765 3766 3767 /** 3768 * @name Simple Consumer API (legacy): Queue consumers 3769 * @{ 3770 * 3771 * The following `..._queue()` functions are analogue to the functions above 3772 * but reads messages from the provided queue \p rkqu instead. 3773 * \p rkqu must have been previously created with `rd_kafka_queue_new()` 3774 * and the topic consumer must have been started with 3775 * `rd_kafka_consume_start_queue()` utilising the the same queue. 3776 */ 3777 3778 /** 3779 * @brief Consume from queue 3780 * 3781 * @sa rd_kafka_consume() 3782 */ 3783 RD_EXPORT 3784 rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, 3785 int timeout_ms); 3786 3787 /** 3788 * @brief Consume batch of messages from queue 3789 * 3790 * @sa rd_kafka_consume_batch() 3791 */ 3792 RD_EXPORT 3793 ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, 3794 int timeout_ms, 3795 rd_kafka_message_t **rkmessages, 3796 size_t rkmessages_size); 3797 3798 /** 3799 * @brief Consume multiple messages from queue with callback 3800 * 3801 * @sa rd_kafka_consume_callback() 3802 * 3803 * @deprecated This API is deprecated and subject for future removal. 3804 * There is no new callback-based consume interface, use the 3805 * poll/queue based alternatives. 3806 */ 3807 RD_EXPORT 3808 int rd_kafka_consume_callback_queue( 3809 rd_kafka_queue_t *rkqu, 3810 int timeout_ms, 3811 void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), 3812 void *commit_opaque); 3813 3814 3815 /**@}*/ 3816 3817 3818 3819 /** 3820 * @name Simple Consumer API (legacy): Topic+partition offset store. 3821 * @{ 3822 * 3823 * If \c auto.commit.enable is true the offset is stored automatically prior to 3824 * returning of the message(s) in each of the rd_kafka_consume*() functions 3825 * above. 3826 */ 3827 3828 3829 /** 3830 * @brief Store offset \p offset + 1 for topic \p rkt partition \p partition. 3831 * 3832 * The \c offset + 1 will be committed (written) to broker (or file) according 3833 * to \c `auto.commit.interval.ms` or manual offset-less commit() 3834 * 3835 * @warning This method may only be called for partitions that are currently 3836 * assigned. 3837 * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. 3838 * Since v1.9.0. 3839 * 3840 * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as 3841 * this may later interfere with resuming a paused partition, instead 3842 * store offsets prior to calling seek. 3843 * 3844 * @remark \c `enable.auto.offset.store` must be set to "false" when using 3845 * this API. 3846 * 3847 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. 3848 */ 3849 RD_EXPORT 3850 rd_kafka_resp_err_t 3851 rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset); 3852 3853 3854 /** 3855 * @brief Store offsets for next auto-commit for one or more partitions. 3856 * 3857 * The offset will be committed (written) to the offset store according 3858 * to \c `auto.commit.interval.ms` or manual offset-less commit(). 3859 * 3860 * Per-partition success/error status propagated through each partition's 3861 * \c .err for all return values (even NO_ERROR) except INVALID_ARG. 3862 * 3863 * @warning This method may only be called for partitions that are currently 3864 * assigned. 3865 * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. 3866 * Since v1.9.0. 3867 * 3868 * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as 3869 * this may later interfere with resuming a paused partition, instead 3870 * store offsets prior to calling seek. 3871 * 3872 * @remark The \c .offset field is stored as is, it will NOT be + 1. 3873 * 3874 * @remark \c `enable.auto.offset.store` must be set to "false" when using 3875 * this API. 3876 * 3877 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on (partial) success, or 3878 * RD_KAFKA_RESP_ERR__INVALID_ARG if \c enable.auto.offset.store 3879 * is true, or 3880 * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or RD_KAFKA_RESP_ERR__STATE 3881 * if none of the offsets could be stored. 3882 */ 3883 RD_EXPORT rd_kafka_resp_err_t 3884 rd_kafka_offsets_store(rd_kafka_t *rk, 3885 rd_kafka_topic_partition_list_t *offsets); 3886 /**@}*/ 3887 3888 3889 3890 /** 3891 * @name KafkaConsumer (C) 3892 * @{ 3893 * @brief High-level KafkaConsumer C API 3894 * 3895 * 3896 * 3897 */ 3898 3899 /** 3900 * @brief Subscribe to topic set using balanced consumer groups. 3901 * 3902 * Wildcard (regex) topics are supported: 3903 * any topic name in the \p topics list that is prefixed with \c \"^\" will 3904 * be regex-matched to the full list of topics in the cluster and matching 3905 * topics will be added to the subscription list. 3906 * 3907 * The full topic list is retrieved every \c topic.metadata.refresh.interval.ms 3908 * to pick up new or delete topics that match the subscription. 3909 * If there is any change to the matched topics the consumer will 3910 * immediately rejoin the group with the updated set of subscribed topics. 3911 * 3912 * Regex and full topic names can be mixed in \p topics. 3913 * 3914 * @remark Only the \c .topic field is used in the supplied \p topics list, 3915 * all other fields are ignored. 3916 * 3917 * @remark subscribe() is an asynchronous method which returns immediately: 3918 * background threads will (re)join the group, wait for group rebalance, 3919 * issue any registered rebalance_cb, assign() the assigned partitions, 3920 * and then start fetching messages. This cycle may take up to 3921 * \c session.timeout.ms * 2 or more to complete. 3922 * 3923 * @remark After this call returns a consumer error will be returned by 3924 * rd_kafka_consumer_poll (et.al) for each unavailable topic in the 3925 * \p topics. The error will be RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART 3926 * for non-existent topics, and 3927 * RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics. 3928 * The consumer error will be raised through rd_kafka_consumer_poll() 3929 * (et.al.) with the \c rd_kafka_message_t.err field set to one of the 3930 * error codes mentioned above. 3931 * The subscribe function itself is asynchronous and will not return 3932 * an error on unavailable topics. 3933 * 3934 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or 3935 * RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid 3936 * topics or regexes or duplicate entries, 3937 * RD_KAFKA_RESP_ERR__FATAL if the consumer has raised a fatal error. 3938 */ 3939 RD_EXPORT rd_kafka_resp_err_t 3940 rd_kafka_subscribe(rd_kafka_t *rk, 3941 const rd_kafka_topic_partition_list_t *topics); 3942 3943 3944 /** 3945 * @brief Unsubscribe from the current subscription set. 3946 */ 3947 RD_EXPORT 3948 rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk); 3949 3950 3951 /** 3952 * @brief Returns the current topic subscription 3953 * 3954 * @returns An error code on failure, otherwise \p topic is updated 3955 * to point to a newly allocated topic list (possibly empty). 3956 * 3957 * @remark The application is responsible for calling 3958 * rd_kafka_topic_partition_list_destroy on the returned list. 3959 */ 3960 RD_EXPORT rd_kafka_resp_err_t 3961 rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics); 3962 3963 3964 3965 /** 3966 * @brief Poll the consumer for messages or events. 3967 * 3968 * Will block for at most \p timeout_ms milliseconds. 3969 * 3970 * @remark An application should make sure to call consumer_poll() at regular 3971 * intervals, even if no messages are expected, to serve any 3972 * queued callbacks waiting to be called. This is especially 3973 * important when a rebalance_cb has been registered as it needs 3974 * to be called and handled properly to synchronize internal 3975 * consumer state. 3976 * 3977 * @returns A message object which is a proper message if \p ->err is 3978 * RD_KAFKA_RESP_ERR_NO_ERROR, or an event or error for any other 3979 * value. 3980 * 3981 * @remark on_consume() interceptors may be called from this function prior to 3982 * passing message to application. 3983 * 3984 * @remark When subscribing to topics the application must call poll at 3985 * least every \c max.poll.interval.ms to remain a member of the 3986 * consumer group. 3987 * 3988 * Noteworthy errors returned in \c ->err: 3989 * - RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED - application failed to call 3990 * poll within `max.poll.interval.ms`. 3991 * 3992 * @sa rd_kafka_message_t 3993 */ 3994 RD_EXPORT 3995 rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms); 3996 3997 /** 3998 * @brief Close the consumer. 3999 * 4000 * This call will block until the consumer has revoked its assignment, 4001 * calling the \c rebalance_cb if it is configured, committed offsets 4002 * to broker, and left the consumer group (if applicable). 4003 * The maximum blocking time is roughly limited to session.timeout.ms. 4004 * 4005 * @returns An error code indicating if the consumer close was succesful 4006 * or not. 4007 * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised 4008 * a fatal error. 4009 * 4010 * @remark The application still needs to call rd_kafka_destroy() after 4011 * this call finishes to clean up the underlying handle resources. 4012 * 4013 */ 4014 RD_EXPORT 4015 rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk); 4016 4017 4018 /** 4019 * @brief Asynchronously close the consumer. 4020 * 4021 * Performs the same actions as rd_kafka_consumer_close() but in a 4022 * background thread. 4023 * 4024 * Rebalance events/callbacks (etc) will be forwarded to the 4025 * application-provided \p rkqu. The application must poll/serve this queue 4026 * until rd_kafka_consumer_closed() returns true. 4027 * 4028 * @remark Depending on consumer group join state there may or may not be 4029 * rebalance events emitted on \p rkqu. 4030 * 4031 * @returns an error object if the consumer close failed, else NULL. 4032 * 4033 * @sa rd_kafka_consumer_closed() 4034 */ 4035 RD_EXPORT 4036 rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, 4037 rd_kafka_queue_t *rkqu); 4038 4039 4040 /** 4041 * @returns 1 if the consumer is closed, else 0. 4042 * 4043 * Should be used in conjunction with rd_kafka_consumer_close_queue() to know 4044 * when the consumer has been closed. 4045 * 4046 * @sa rd_kafka_consumer_close_queue() 4047 */ 4048 RD_EXPORT 4049 int rd_kafka_consumer_closed(rd_kafka_t *rk); 4050 4051 4052 /** 4053 * @brief Incrementally add \p partitions to the current assignment. 4054 * 4055 * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, 4056 * this method should be used in a rebalance callback to adjust the current 4057 * assignment appropriately in the case where the rebalance type is 4058 * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. The application must pass the 4059 * partition list passed to the callback (or a copy of it), even if the 4060 * list is empty. \p partitions must not be NULL. This method may also be 4061 * used outside the context of a rebalance callback. 4062 * 4063 * @returns NULL on success, or an error object if the operation was 4064 * unsuccessful. 4065 * 4066 * @remark The returned error object (if not NULL) must be destroyed with 4067 * rd_kafka_error_destroy(). 4068 */ 4069 RD_EXPORT rd_kafka_error_t * 4070 rd_kafka_incremental_assign(rd_kafka_t *rk, 4071 const rd_kafka_topic_partition_list_t *partitions); 4072 4073 4074 /** 4075 * @brief Incrementally remove \p partitions from the current assignment. 4076 * 4077 * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, 4078 * this method should be used in a rebalance callback to adjust the current 4079 * assignment appropriately in the case where the rebalance type is 4080 * RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. The application must pass the 4081 * partition list passed to the callback (or a copy of it), even if the 4082 * list is empty. \p partitions must not be NULL. This method may also be 4083 * used outside the context of a rebalance callback. 4084 * 4085 * @returns NULL on success, or an error object if the operation was 4086 * unsuccessful. 4087 * 4088 * @remark The returned error object (if not NULL) must be destroyed with 4089 * rd_kafka_error_destroy(). 4090 */ 4091 RD_EXPORT rd_kafka_error_t *rd_kafka_incremental_unassign( 4092 rd_kafka_t *rk, 4093 const rd_kafka_topic_partition_list_t *partitions); 4094 4095 4096 /** 4097 * @brief The rebalance protocol currently in use. This will be 4098 * "NONE" if the consumer has not (yet) joined a group, else it will 4099 * match the rebalance protocol ("EAGER", "COOPERATIVE") of the 4100 * configured and selected assignor(s). All configured 4101 * assignors must have the same protocol type, meaning 4102 * online migration of a consumer group from using one 4103 * protocol to another (in particular upgading from EAGER 4104 * to COOPERATIVE) without a restart is not currently 4105 * supported. 4106 * 4107 * @returns NULL on error, or one of "NONE", "EAGER", "COOPERATIVE" on success. 4108 */ 4109 RD_EXPORT 4110 const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk); 4111 4112 4113 /** 4114 * @brief Atomic assignment of partitions to consume. 4115 * 4116 * The new \p partitions will replace the existing assignment. 4117 * 4118 * A zero-length \p partitions will treat the partitions as a valid, 4119 * albeit empty assignment, and maintain internal state, while a \c NULL 4120 * value for \p partitions will reset and clear the internal state. 4121 * 4122 * When used from a rebalance callback, the application should pass the 4123 * partition list passed to the callback (or a copy of it) even if the list 4124 * is empty (i.e. should not pass NULL in this case) so as to maintain 4125 * internal join state. This is not strictly required - the application 4126 * may adjust the assignment provided by the group. However, this is rarely 4127 * useful in practice. 4128 * 4129 * @returns An error code indicating if the new assignment was applied or not. 4130 * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised 4131 * a fatal error. 4132 */ 4133 RD_EXPORT rd_kafka_resp_err_t 4134 rd_kafka_assign(rd_kafka_t *rk, 4135 const rd_kafka_topic_partition_list_t *partitions); 4136 4137 /** 4138 * @brief Returns the current partition assignment as set by rd_kafka_assign() 4139 * or rd_kafka_incremental_assign(). 4140 * 4141 * @returns An error code on failure, otherwise \p partitions is updated 4142 * to point to a newly allocated partition list (possibly empty). 4143 * 4144 * @remark The application is responsible for calling 4145 * rd_kafka_topic_partition_list_destroy on the returned list. 4146 * 4147 * @remark This assignment represents the partitions assigned through the 4148 * assign functions and not the partitions assigned to this consumer 4149 * instance by the consumer group leader. 4150 * They are usually the same following a rebalance but not necessarily 4151 * since an application is free to assign any partitions. 4152 */ 4153 RD_EXPORT rd_kafka_resp_err_t 4154 rd_kafka_assignment(rd_kafka_t *rk, 4155 rd_kafka_topic_partition_list_t **partitions); 4156 4157 4158 /** 4159 * @brief Check whether the consumer considers the current assignment to 4160 * have been lost involuntarily. This method is only applicable for 4161 * use with a high level subscribing consumer. Assignments are revoked 4162 * immediately when determined to have been lost, so this method 4163 * is only useful when reacting to a RD_KAFKA_EVENT_REBALANCE event 4164 * or from within a rebalance_cb. Partitions that have been lost may 4165 * already be owned by other members in the group and therefore 4166 * commiting offsets, for example, may fail. 4167 * 4168 * @remark Calling rd_kafka_assign(), rd_kafka_incremental_assign() or 4169 * rd_kafka_incremental_unassign() resets this flag. 4170 * 4171 * @returns Returns 1 if the current partition assignment is considered 4172 * lost, 0 otherwise. 4173 */ 4174 RD_EXPORT int rd_kafka_assignment_lost(rd_kafka_t *rk); 4175 4176 4177 /** 4178 * @brief Commit offsets on broker for the provided list of partitions. 4179 * 4180 * \p offsets should contain \c topic, \c partition, \c offset and possibly 4181 * \c metadata. The \c offset should be the offset where consumption will 4182 * resume, i.e., the last processed offset + 1. 4183 * If \p offsets is NULL the current partition assignment will be used instead. 4184 * 4185 * If \p async is false this operation will block until the broker offset commit 4186 * is done, returning the resulting success or error code. 4187 * 4188 * If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been 4189 * configured the callback will be enqueued for a future call to 4190 * rd_kafka_poll(), rd_kafka_consumer_poll() or similar. 4191 * 4192 * @returns An error code indiciating if the commit was successful, 4193 * or successfully scheduled if asynchronous, or failed. 4194 * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised 4195 * a fatal error. 4196 */ 4197 RD_EXPORT rd_kafka_resp_err_t 4198 rd_kafka_commit(rd_kafka_t *rk, 4199 const rd_kafka_topic_partition_list_t *offsets, 4200 int async); 4201 4202 4203 /** 4204 * @brief Commit message's offset on broker for the message's partition. 4205 * The committed offset is the message's offset + 1. 4206 * 4207 * @sa rd_kafka_commit 4208 */ 4209 RD_EXPORT rd_kafka_resp_err_t 4210 rd_kafka_commit_message(rd_kafka_t *rk, 4211 const rd_kafka_message_t *rkmessage, 4212 int async); 4213 4214 4215 /** 4216 * @brief Commit offsets on broker for the provided list of partitions. 4217 * 4218 * See rd_kafka_commit for \p offsets semantics. 4219 * 4220 * The result of the offset commit will be posted on the provided \p rkqu queue. 4221 * 4222 * If the application uses one of the poll APIs (rd_kafka_poll(), 4223 * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue 4224 * the \p cb callback is required. 4225 * 4226 * The \p commit_opaque argument is passed to the callback as \p commit_opaque, 4227 * or if using the event API the callback is ignored and the offset commit 4228 * result will be returned as an RD_KAFKA_EVENT_COMMIT event and the 4229 * \p commit_opaque value will be available with rd_kafka_event_opaque(). 4230 * 4231 * If \p rkqu is NULL a temporary queue will be created and the callback will 4232 * be served by this call. 4233 * 4234 * @sa rd_kafka_commit() 4235 * @sa rd_kafka_conf_set_offset_commit_cb() 4236 */ 4237 RD_EXPORT rd_kafka_resp_err_t 4238 rd_kafka_commit_queue(rd_kafka_t *rk, 4239 const rd_kafka_topic_partition_list_t *offsets, 4240 rd_kafka_queue_t *rkqu, 4241 void (*cb)(rd_kafka_t *rk, 4242 rd_kafka_resp_err_t err, 4243 rd_kafka_topic_partition_list_t *offsets, 4244 void *commit_opaque), 4245 void *commit_opaque); 4246 4247 4248 /** 4249 * @brief Retrieve committed offsets for topics+partitions. 4250 * 4251 * The \p offset field of each requested partition will either be set to 4252 * stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored 4253 * offset for that partition. 4254 * 4255 * Committed offsets will be returned according to the `isolation.level` 4256 * configuration property, if set to `read_committed` (default) then only 4257 * stable offsets for fully committed transactions will be returned, while 4258 * `read_uncommitted` may return offsets for not yet committed transactions. 4259 * 4260 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the 4261 * \p offset or \p err field of each \p partitions' element is filled 4262 * in with the stored offset, or a partition specific error. 4263 * Else returns an error code. 4264 */ 4265 RD_EXPORT rd_kafka_resp_err_t 4266 rd_kafka_committed(rd_kafka_t *rk, 4267 rd_kafka_topic_partition_list_t *partitions, 4268 int timeout_ms); 4269 4270 4271 4272 /** 4273 * @brief Retrieve current positions (offsets) for topics+partitions. 4274 * 4275 * The \p offset field of each requested partition will be set to the offset 4276 * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there 4277 * was no previous message. 4278 * 4279 * @remark In this context the last consumed message is the offset consumed 4280 * by the current librdkafka instance and, in case of rebalancing, not 4281 * necessarily the last message fetched from the partition. 4282 * 4283 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the 4284 * \p offset or \p err field of each \p partitions' element is filled 4285 * in with the stored offset, or a partition specific error. 4286 * Else returns an error code. 4287 */ 4288 RD_EXPORT rd_kafka_resp_err_t 4289 rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions); 4290 4291 4292 4293 /** 4294 * @returns the current consumer group metadata associated with this consumer, 4295 * or NULL if \p rk is not a consumer configured with a \c group.id. 4296 * This metadata object should be passed to the transactional 4297 * producer's rd_kafka_send_offsets_to_transaction() API. 4298 * 4299 * @remark The returned pointer must be freed by the application using 4300 * rd_kafka_consumer_group_metadata_destroy(). 4301 * 4302 * @sa rd_kafka_send_offsets_to_transaction() 4303 */ 4304 RD_EXPORT rd_kafka_consumer_group_metadata_t * 4305 rd_kafka_consumer_group_metadata(rd_kafka_t *rk); 4306 4307 4308 /** 4309 * @brief Create a new consumer group metadata object. 4310 * This is typically only used for writing tests. 4311 * 4312 * @param group_id The group id. 4313 * 4314 * @remark The returned pointer must be freed by the application using 4315 * rd_kafka_consumer_group_metadata_destroy(). 4316 */ 4317 RD_EXPORT rd_kafka_consumer_group_metadata_t * 4318 rd_kafka_consumer_group_metadata_new(const char *group_id); 4319 4320 4321 /** 4322 * @brief Create a new consumer group metadata object. 4323 * This is typically only used for writing tests. 4324 * 4325 * @param group_id The group id. 4326 * @param generation_id The group generation id. 4327 * @param member_id The group member id. 4328 * @param group_instance_id The group instance id (may be NULL). 4329 * 4330 * @remark The returned pointer must be freed by the application using 4331 * rd_kafka_consumer_group_metadata_destroy(). 4332 */ 4333 RD_EXPORT rd_kafka_consumer_group_metadata_t * 4334 rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, 4335 int32_t generation_id, 4336 const char *member_id, 4337 const char *group_instance_id); 4338 4339 4340 /** 4341 * @brief Frees the consumer group metadata object as returned by 4342 * rd_kafka_consumer_group_metadata(). 4343 */ 4344 RD_EXPORT void 4345 rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *); 4346 4347 4348 /** 4349 * @brief Serialize the consumer group metadata to a binary format. 4350 * This is mainly for client binding use and not for application use. 4351 * 4352 * @remark The serialized metadata format is private and is not compatible 4353 * across different versions or even builds of librdkafka. 4354 * It should only be used in the same process runtime and must only 4355 * be passed to rd_kafka_consumer_group_metadata_read(). 4356 * 4357 * @param cgmd Metadata to be serialized. 4358 * @param bufferp On success this pointer will be updated to point to na 4359 * allocated buffer containing the serialized metadata. 4360 * The buffer must be freed with rd_kafka_mem_free(). 4361 * @param sizep The pointed to size will be updated with the size of 4362 * the serialized buffer. 4363 * 4364 * @returns NULL on success or an error object on failure. 4365 * 4366 * @sa rd_kafka_consumer_group_metadata_read() 4367 */ 4368 RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_write( 4369 const rd_kafka_consumer_group_metadata_t *cgmd, 4370 void **bufferp, 4371 size_t *sizep); 4372 4373 /** 4374 * @brief Reads serialized consumer group metadata and returns a 4375 * consumer group metadata object. 4376 * This is mainly for client binding use and not for application use. 4377 * 4378 * @remark The serialized metadata format is private and is not compatible 4379 * across different versions or even builds of librdkafka. 4380 * It should only be used in the same process runtime and must only 4381 * be passed to rd_kafka_consumer_group_metadata_read(). 4382 * 4383 * @param cgmdp On success this pointer will be updated to point to a new 4384 * consumer group metadata object which must be freed with 4385 * rd_kafka_consumer_group_metadata_destroy(). 4386 * @param buffer Pointer to the serialized data. 4387 * @param size Size of the serialized data. 4388 * 4389 * @returns NULL on success or an error object on failure. 4390 * 4391 * @sa rd_kafka_consumer_group_metadata_write() 4392 */ 4393 RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_read( 4394 rd_kafka_consumer_group_metadata_t **cgmdp, 4395 const void *buffer, 4396 size_t size); 4397 4398 /**@}*/ 4399 4400 4401 4402 /** 4403 * @name Producer API 4404 * @{ 4405 * 4406 * 4407 */ 4408 4409 4410 /** 4411 * @brief Producer message flags 4412 */ 4413 #define RD_KAFKA_MSG_F_FREE \ 4414 0x1 /**< Delegate freeing of payload to rdkafka. \ 4415 */ 4416 #define RD_KAFKA_MSG_F_COPY \ 4417 0x2 /**< rdkafka will make a copy of the payload. \ 4418 */ 4419 #define RD_KAFKA_MSG_F_BLOCK \ 4420 0x4 /**< Block produce*() on message queue full. \ 4421 * WARNING: If a delivery report callback \ 4422 * is used the application MUST \ 4423 * call rd_kafka_poll() (or equiv.) \ 4424 * to make sure delivered messages \ 4425 * are drained from the internal \ 4426 * delivery report queue. \ 4427 * Failure to do so will result \ 4428 * in indefinately blocking on \ 4429 * the produce() call when the \ 4430 * message queue is full. */ 4431 #define RD_KAFKA_MSG_F_PARTITION \ 4432 0x8 /**< produce_batch() will honor \ 4433 * per-message partition. */ 4434 4435 4436 4437 /** 4438 * @brief Produce and send a single message to broker. 4439 * 4440 * \p rkt is the target topic which must have been previously created with 4441 * `rd_kafka_topic_new()`. 4442 * 4443 * `rd_kafka_produce()` is an asynch non-blocking API. 4444 * See `rd_kafka_conf_set_dr_msg_cb` on how to setup a callback to be called 4445 * once the delivery status (success or failure) is known. The delivery report 4446 * is trigged by the application calling `rd_kafka_poll()` (at regular 4447 * intervals) or `rd_kafka_flush()` (at termination). 4448 * 4449 * Since producing is asynchronous, you should call `rd_kafka_flush()` before 4450 * you destroy the producer. Otherwise, any outstanding messages will be 4451 * silently discarded. 4452 * 4453 * When temporary errors occur, librdkafka automatically retries to produce the 4454 * messages. Retries are triggered after retry.backoff.ms and when the 4455 * leader broker for the given partition is available. Otherwise, librdkafka 4456 * falls back to polling the topic metadata to monitor when a new leader is 4457 * elected (see the topic.metadata.refresh.fast.interval.ms and 4458 * topic.metadata.refresh.interval.ms configurations) and then performs a 4459 * retry. A delivery error will occur if the message could not be produced 4460 * within message.timeout.ms. 4461 * 4462 * See the "Message reliability" chapter in INTRODUCTION.md for more 4463 * information. 4464 * 4465 * \p partition is the target partition, either: 4466 * - RD_KAFKA_PARTITION_UA (unassigned) for 4467 * automatic partitioning using the topic's partitioner function, or 4468 * - a fixed partition (0..N) 4469 * 4470 * \p msgflags is zero or more of the following flags OR:ed together: 4471 * RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if 4472 * \p queue.buffering.max.messages or 4473 * \p queue.buffering.max.kbytes are exceeded. 4474 * Messages are considered in-queue from the point 4475 * they are accepted by produce() until their corresponding delivery report 4476 * callback/event returns. It is thus a requirement to call rd_kafka_poll() (or 4477 * equiv.) from a separate thread when F_BLOCK is used. See WARNING on \c 4478 * RD_KAFKA_MSG_F_BLOCK above. 4479 * 4480 * RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done 4481 * with it. 4482 * RD_KAFKA_MSG_F_COPY - the \p payload data will be copied and the 4483 * \p payload pointer will not be used by rdkafka 4484 * after the call returns. 4485 * RD_KAFKA_MSG_F_PARTITION - produce_batch() will honour per-message 4486 * partition, either set manually or by the 4487 * configured partitioner. 4488 * 4489 * .._F_FREE and .._F_COPY are mutually exclusive. If neither of these are 4490 * set, the caller must ensure that the memory backing \p payload remains 4491 * valid and is not modified or reused until the delivery callback is 4492 * invoked. Other buffers passed to `rd_kafka_produce()` don't have this 4493 * restriction on reuse, i.e. the memory backing the key or the topic name 4494 * may be reused as soon as `rd_kafka_produce()` returns. 4495 * 4496 * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then 4497 * the memory associated with the payload is still the caller's 4498 * responsibility. 4499 * 4500 * \p payload is the message payload of size \p len bytes. 4501 * 4502 * \p key is an optional message key of size \p keylen bytes, if non-NULL it 4503 * will be passed to the topic partitioner as well as be sent with the 4504 * message to the broker and passed on to the consumer. 4505 * 4506 * \p msg_opaque is an optional application-provided per-message opaque 4507 * pointer that will provided in the message's delivery report callback 4508 * (\c dr_msg_cb or \c dr_cb) and the \c rd_kafka_message_t \c _private field. 4509 * 4510 * @remark on_send() and on_acknowledgement() interceptors may be called 4511 * from this function. on_acknowledgement() will only be called if the 4512 * message fails partitioning. 4513 * 4514 * @remark If the producer is transactional (\c transactional.id is configured) 4515 * producing is only allowed during an on-going transaction, namely 4516 * after rd_kafka_begin_transaction() has been called. 4517 * 4518 * @returns 0 on success or -1 on error in which case errno is set accordingly: 4519 * - ENOBUFS - maximum number of outstanding messages has been reached: 4520 * "queue.buffering.max.messages" 4521 * (RD_KAFKA_RESP_ERR__QUEUE_FULL) 4522 * - EMSGSIZE - message is larger than configured max size: 4523 * "messages.max.bytes". 4524 * (RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) 4525 * - ESRCH - requested \p partition is unknown in the Kafka cluster. 4526 * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) 4527 * - ENOENT - topic is unknown in the Kafka cluster. 4528 * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) 4529 * - ECANCELED - fatal error has been raised on producer, see 4530 * rd_kafka_fatal_error(), 4531 * (RD_KAFKA_RESP_ERR__FATAL). 4532 * - ENOEXEC - transactional state forbids producing 4533 * (RD_KAFKA_RESP_ERR__STATE) 4534 * 4535 * @sa Use rd_kafka_errno2err() to convert `errno` to rdkafka error code. 4536 */ 4537 RD_EXPORT 4538 int rd_kafka_produce(rd_kafka_topic_t *rkt, 4539 int32_t partition, 4540 int msgflags, 4541 void *payload, 4542 size_t len, 4543 const void *key, 4544 size_t keylen, 4545 void *msg_opaque); 4546 4547 4548 /** 4549 * @brief Produce and send a single message to broker. 4550 * 4551 * The message is defined by a va-arg list using \c rd_kafka_vtype_t 4552 * tag tuples which must be terminated with a single \c RD_KAFKA_V_END. 4553 * 4554 * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code as 4555 * described in rd_kafka_produce(). 4556 * \c RD_KAFKA_RESP_ERR__CONFLICT is returned if _V_HEADER and 4557 * _V_HEADERS are mixed. 4558 * 4559 * @sa rd_kafka_produce, rd_kafka_produceva, RD_KAFKA_V_END 4560 */ 4561 RD_EXPORT 4562 rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...); 4563 4564 4565 /** 4566 * @brief Produce and send a single message to broker. 4567 * 4568 * The message is defined by an array of \c rd_kafka_vu_t of 4569 * count \p cnt. 4570 * 4571 * @returns an error object on failure or NULL on success. 4572 * See rd_kafka_producev() for specific error codes. 4573 * 4574 * @sa rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END 4575 */ 4576 RD_EXPORT 4577 rd_kafka_error_t * 4578 rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt); 4579 4580 4581 /** 4582 * @brief Produce multiple messages. 4583 * 4584 * If partition is RD_KAFKA_PARTITION_UA the configured partitioner will 4585 * be run for each message (slower), otherwise the messages will be enqueued 4586 * to the specified partition directly (faster). 4587 * 4588 * The messages are provided in the array \p rkmessages of count \p message_cnt 4589 * elements. 4590 * The \p partition and \p msgflags are used for all provided messages. 4591 * 4592 * Honoured \p rkmessages[] fields are: 4593 * - payload,len Message payload and length 4594 * - key,key_len Optional message key 4595 * - _private Message opaque pointer (msg_opaque) 4596 * - err Will be set according to success or failure, see 4597 * rd_kafka_produce() for possible error codes. 4598 * Application only needs to check for errors if 4599 * return value != \p message_cnt. 4600 * 4601 * @remark If \c RD_KAFKA_MSG_F_PARTITION is set in \p msgflags, the 4602 * \c .partition field of the \p rkmessages is used instead of 4603 * \p partition. 4604 * 4605 * @returns the number of messages succesfully enqueued for producing. 4606 * 4607 * @remark This interface does NOT support setting message headers on 4608 * the provided \p rkmessages. 4609 */ 4610 RD_EXPORT 4611 int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, 4612 int32_t partition, 4613 int msgflags, 4614 rd_kafka_message_t *rkmessages, 4615 int message_cnt); 4616 4617 4618 4619 /** 4620 * @brief Wait until all outstanding produce requests, et.al, are completed. 4621 * This should typically be done prior to destroying a producer instance 4622 * to make sure all queued and in-flight produce requests are completed 4623 * before terminating. 4624 * 4625 * @remark This function will call rd_kafka_poll() and thus trigger callbacks. 4626 * 4627 * @remark The \c linger.ms time will be ignored for the duration of the call, 4628 * queued messages will be sent to the broker as soon as possible. 4629 * 4630 * @remark If RD_KAFKA_EVENT_DR has been enabled 4631 * (through rd_kafka_conf_set_events()) this function will not call 4632 * rd_kafka_poll() but instead wait for the librdkafka-handled 4633 * message count to reach zero. This requires the application to 4634 * serve the event queue in a separate thread. 4635 * In this mode only messages are counted, not other types of 4636 * queued events. 4637 * 4638 * @returns RD_KAFKA_RESP_ERR__TIMED_OUT if \p timeout_ms was reached before all 4639 * outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR 4640 * 4641 * @sa rd_kafka_outq_len() 4642 */ 4643 RD_EXPORT 4644 rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms); 4645 4646 4647 4648 /** 4649 * @brief Purge messages currently handled by the producer instance. 4650 * 4651 * @param rk Client instance. 4652 * @param purge_flags Tells which messages to purge and how. 4653 * 4654 * The application will need to call rd_kafka_poll() or rd_kafka_flush() 4655 * afterwards to serve the delivery report callbacks of the purged messages. 4656 * 4657 * Messages purged from internal queues fail with the delivery report 4658 * error code set to RD_KAFKA_RESP_ERR__PURGE_QUEUE, while purged messages that 4659 * are in-flight to or from the broker will fail with the error code set to 4660 * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT. 4661 * 4662 * @warning Purging messages that are in-flight to or from the broker 4663 * will ignore any sub-sequent acknowledgement for these messages 4664 * received from the broker, effectively making it impossible 4665 * for the application to know if the messages were successfully 4666 * produced or not. This may result in duplicate messages if the 4667 * application retries these messages at a later time. 4668 * 4669 * @remark This call may block for a short time while background thread 4670 * queues are purged. 4671 * 4672 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, 4673 * RD_KAFKA_RESP_ERR__INVALID_ARG if the \p purge flags are invalid 4674 * or unknown, 4675 * RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if called on a non-producer 4676 * client instance. 4677 */ 4678 RD_EXPORT 4679 rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags); 4680 4681 4682 /** 4683 * @brief Flags for rd_kafka_purge() 4684 */ 4685 4686 /*! 4687 * Purge messages in internal queues. 4688 */ 4689 #define RD_KAFKA_PURGE_F_QUEUE 0x1 4690 4691 /*! 4692 * Purge messages in-flight to or from the broker. 4693 * Purging these messages will void any future acknowledgements from the 4694 * broker, making it impossible for the application to know if these 4695 * messages were successfully delivered or not. 4696 * Retrying these messages may lead to duplicates. 4697 */ 4698 #define RD_KAFKA_PURGE_F_INFLIGHT 0x2 4699 4700 4701 /*! 4702 * Don't wait for background thread queue purging to finish. 4703 */ 4704 #define RD_KAFKA_PURGE_F_NON_BLOCKING 0x4 4705 4706 4707 /**@}*/ 4708 4709 4710 /** 4711 * @name Metadata API 4712 * @{ 4713 * 4714 * 4715 */ 4716 4717 4718 /** 4719 * @brief Broker information 4720 */ 4721 typedef struct rd_kafka_metadata_broker { 4722 int32_t id; /**< Broker Id */ 4723 char *host; /**< Broker hostname */ 4724 int port; /**< Broker listening port */ 4725 } rd_kafka_metadata_broker_t; 4726 4727 /** 4728 * @brief Partition information 4729 */ 4730 typedef struct rd_kafka_metadata_partition { 4731 int32_t id; /**< Partition Id */ 4732 rd_kafka_resp_err_t err; /**< Partition error reported by broker */ 4733 int32_t leader; /**< Leader broker */ 4734 int replica_cnt; /**< Number of brokers in \p replicas */ 4735 int32_t *replicas; /**< Replica brokers */ 4736 int isr_cnt; /**< Number of ISR brokers in \p isrs */ 4737 int32_t *isrs; /**< In-Sync-Replica brokers */ 4738 } rd_kafka_metadata_partition_t; 4739 4740 /** 4741 * @brief Topic information 4742 */ 4743 typedef struct rd_kafka_metadata_topic { 4744 char *topic; /**< Topic name */ 4745 int partition_cnt; /**< Number of partitions in \p partitions*/ 4746 struct rd_kafka_metadata_partition *partitions; /**< Partitions */ 4747 rd_kafka_resp_err_t err; /**< Topic error reported by broker */ 4748 } rd_kafka_metadata_topic_t; 4749 4750 4751 /** 4752 * @brief Metadata container 4753 */ 4754 typedef struct rd_kafka_metadata { 4755 int broker_cnt; /**< Number of brokers in \p brokers */ 4756 struct rd_kafka_metadata_broker *brokers; /**< Brokers */ 4757 4758 int topic_cnt; /**< Number of topics in \p topics */ 4759 struct rd_kafka_metadata_topic *topics; /**< Topics */ 4760 4761 int32_t orig_broker_id; /**< Broker originating this metadata */ 4762 char *orig_broker_name; /**< Name of originating broker */ 4763 } rd_kafka_metadata_t; 4764 4765 4766 /** 4767 * @brief Request Metadata from broker. 4768 * 4769 * Parameters: 4770 * - \p all_topics if non-zero: request info about all topics in cluster, 4771 * if zero: only request info about locally known topics. 4772 * - \p only_rkt only request info about this topic 4773 * - \p metadatap pointer to hold metadata result. 4774 * The \p *metadatap pointer must be released 4775 * with rd_kafka_metadata_destroy(). 4776 * - \p timeout_ms maximum response time before failing. 4777 * 4778 * @remark Consumer: If \p all_topics is non-zero the Metadata response 4779 * information may trigger a re-join if any subscribed topics 4780 * have changed partition count or existence state. 4781 * 4782 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) 4783 * will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or 4784 * other error code on error. 4785 */ 4786 RD_EXPORT 4787 rd_kafka_resp_err_t 4788 rd_kafka_metadata(rd_kafka_t *rk, 4789 int all_topics, 4790 rd_kafka_topic_t *only_rkt, 4791 const struct rd_kafka_metadata **metadatap, 4792 int timeout_ms); 4793 4794 /** 4795 * @brief Release metadata memory. 4796 */ 4797 RD_EXPORT 4798 void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata); 4799 4800 4801 /**@}*/ 4802 4803 4804 4805 /** 4806 * @name Client group information 4807 * @{ 4808 * 4809 * 4810 */ 4811 4812 4813 /** 4814 * @brief Group member information 4815 * 4816 * For more information on \p member_metadata format, see 4817 * https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI 4818 * 4819 */ 4820 struct rd_kafka_group_member_info { 4821 char *member_id; /**< Member id (generated by broker) */ 4822 char *client_id; /**< Client's \p client.id */ 4823 char *client_host; /**< Client's hostname */ 4824 void *member_metadata; /**< Member metadata (binary), 4825 * format depends on \p protocol_type. */ 4826 int member_metadata_size; /**< Member metadata size in bytes */ 4827 void *member_assignment; /**< Member assignment (binary), 4828 * format depends on \p protocol_type. */ 4829 int member_assignment_size; /**< Member assignment size in bytes */ 4830 }; 4831 4832 /** 4833 * @brief Group information 4834 */ 4835 struct rd_kafka_group_info { 4836 struct rd_kafka_metadata_broker broker; /**< Originating broker info */ 4837 char *group; /**< Group name */ 4838 rd_kafka_resp_err_t err; /**< Broker-originated error */ 4839 char *state; /**< Group state */ 4840 char *protocol_type; /**< Group protocol type */ 4841 char *protocol; /**< Group protocol */ 4842 struct rd_kafka_group_member_info *members; /**< Group members */ 4843 int member_cnt; /**< Group member count */ 4844 }; 4845 4846 /** 4847 * @brief List of groups 4848 * 4849 * @sa rd_kafka_group_list_destroy() to release list memory. 4850 */ 4851 struct rd_kafka_group_list { 4852 struct rd_kafka_group_info *groups; /**< Groups */ 4853 int group_cnt; /**< Group count */ 4854 }; 4855 4856 4857 /** 4858 * @brief List and describe client groups in cluster. 4859 * 4860 * \p group is an optional group name to describe, otherwise (\p NULL) all 4861 * groups are returned. 4862 * 4863 * \p timeout_ms is the (approximate) maximum time to wait for response 4864 * from brokers and must be a positive value. 4865 * 4866 * @returns \c RD_KAFKA_RESP_ERR__NO_ERROR on success and \p grplistp is 4867 * updated to point to a newly allocated list of groups. 4868 * \c RD_KAFKA_RESP_ERR__PARTIAL if not all brokers responded 4869 * in time but at least one group is returned in \p grplistlp. 4870 * \c RD_KAFKA_RESP_ERR__TIMED_OUT if no groups were returned in the 4871 * given timeframe but not all brokers have yet responded, or 4872 * if the list of brokers in the cluster could not be obtained within 4873 * the given timeframe. 4874 * \c RD_KAFKA_RESP_ERR__TRANSPORT if no brokers were found. 4875 * Other error codes may also be returned from the request layer. 4876 * 4877 * The \p grplistp remains untouched if any error code is returned, 4878 * with the exception of RD_KAFKA_RESP_ERR__PARTIAL which behaves 4879 * as RD_KAFKA_RESP_ERR__NO_ERROR (success) but with an incomplete 4880 * group list. 4881 * 4882 * @sa Use rd_kafka_group_list_destroy() to release list memory. 4883 */ 4884 RD_EXPORT 4885 rd_kafka_resp_err_t 4886 rd_kafka_list_groups(rd_kafka_t *rk, 4887 const char *group, 4888 const struct rd_kafka_group_list **grplistp, 4889 int timeout_ms); 4890 4891 /** 4892 * @brief Release list memory 4893 */ 4894 RD_EXPORT 4895 void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist); 4896 4897 4898 /**@}*/ 4899 4900 4901 4902 /** 4903 * @name Miscellaneous APIs 4904 * @{ 4905 * 4906 */ 4907 4908 4909 /** 4910 * @brief Adds one or more brokers to the kafka handle's list of initial 4911 * bootstrap brokers. 4912 * 4913 * Additional brokers will be discovered automatically as soon as rdkafka 4914 * connects to a broker by querying the broker metadata. 4915 * 4916 * If a broker name resolves to multiple addresses (and possibly 4917 * address families) all will be used for connection attempts in 4918 * round-robin fashion. 4919 * 4920 * \p brokerlist is a ,-separated list of brokers in the format: 4921 * \c \<broker1\>,\<broker2\>,.. 4922 * Where each broker is in either the host or URL based format: 4923 * \c \<host\>[:\<port\>] 4924 * \c \<proto\>://\<host\>[:port] 4925 * \c \<proto\> is either \c PLAINTEXT, \c SSL, \c SASL, \c SASL_PLAINTEXT 4926 * The two formats can be mixed but ultimately the value of the 4927 * `security.protocol` config property decides what brokers are allowed. 4928 * 4929 * Example: 4930 * brokerlist = "broker1:10000,broker2" 4931 * brokerlist = "SSL://broker3:9000,ssl://broker2" 4932 * 4933 * @returns the number of brokers successfully added. 4934 * 4935 * @remark Brokers may also be defined with the \c metadata.broker.list or 4936 * \c bootstrap.servers configuration property (preferred method). 4937 * 4938 * @deprecated Set bootstrap servers with the \c bootstrap.servers 4939 * configuration property. 4940 */ 4941 RD_EXPORT 4942 int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist); 4943 4944 4945 4946 /** 4947 * @brief Set logger function. 4948 * 4949 * The default is to print to stderr, but a syslog logger is also available, 4950 * see rd_kafka_log_(print|syslog) for the builtin alternatives. 4951 * Alternatively the application may provide its own logger callback. 4952 * Or pass 'func' as NULL to disable logging. 4953 * 4954 * @deprecated Use rd_kafka_conf_set_log_cb() 4955 * 4956 * @remark \p rk may be passed as NULL in the callback. 4957 */ 4958 RD_EXPORT RD_DEPRECATED void 4959 rd_kafka_set_logger(rd_kafka_t *rk, 4960 void (*func)(const rd_kafka_t *rk, 4961 int level, 4962 const char *fac, 4963 const char *buf)); 4964 4965 4966 /** 4967 * @brief Specifies the maximum logging level emitted by 4968 * internal kafka logging and debugging. 4969 * 4970 * @deprecated Set the \c "log_level" configuration property instead. 4971 * 4972 * @remark If the \p \"debug\" configuration property is set the log level is 4973 * automatically adjusted to \c LOG_DEBUG (7). 4974 */ 4975 RD_EXPORT 4976 void rd_kafka_set_log_level(rd_kafka_t *rk, int level); 4977 4978 4979 /** 4980 * @brief Builtin (default) log sink: print to stderr 4981 */ 4982 RD_EXPORT 4983 void rd_kafka_log_print(const rd_kafka_t *rk, 4984 int level, 4985 const char *fac, 4986 const char *buf); 4987 4988 4989 /** 4990 * @brief Builtin log sink: print to syslog. 4991 * @remark This logger is only available if librdkafka was built 4992 * with syslog support. 4993 */ 4994 RD_EXPORT 4995 void rd_kafka_log_syslog(const rd_kafka_t *rk, 4996 int level, 4997 const char *fac, 4998 const char *buf); 4999 5000 5001 /** 5002 * @brief Returns the current out queue length. 5003 * 5004 * The out queue length is the sum of: 5005 * - number of messages waiting to be sent to, or acknowledged by, 5006 * the broker. 5007 * - number of delivery reports (e.g., dr_msg_cb) waiting to be served 5008 * by rd_kafka_poll() or rd_kafka_flush(). 5009 * - number of callbacks (e.g., error_cb, stats_cb, etc) waiting to be 5010 * served by rd_kafka_poll(), rd_kafka_consumer_poll() or rd_kafka_flush(). 5011 * - number of events waiting to be served by background_event_cb() in 5012 * the background queue (see rd_kafka_conf_set_background_event_cb). 5013 * 5014 * An application should wait for the return value of this function to reach 5015 * zero before terminating to make sure outstanding messages, 5016 * requests (such as offset commits), callbacks and events are fully processed. 5017 * See rd_kafka_flush(). 5018 * 5019 * @returns number of messages and events waiting in queues. 5020 * 5021 * @sa rd_kafka_flush() 5022 */ 5023 RD_EXPORT 5024 int rd_kafka_outq_len(rd_kafka_t *rk); 5025 5026 5027 5028 /** 5029 * @brief Dumps rdkafka's internal state for handle \p rk to stream \p fp 5030 * 5031 * This is only useful for debugging rdkafka, showing state and statistics 5032 * for brokers, topics, partitions, etc. 5033 */ 5034 RD_EXPORT 5035 void rd_kafka_dump(FILE *fp, rd_kafka_t *rk); 5036 5037 5038 5039 /** 5040 * @brief Retrieve the current number of threads in use by librdkafka. 5041 * 5042 * Used by regression tests. 5043 */ 5044 RD_EXPORT 5045 int rd_kafka_thread_cnt(void); 5046 5047 5048 /** 5049 * @enum rd_kafka_thread_type_t 5050 * 5051 * @brief librdkafka internal thread type. 5052 * 5053 * @sa rd_kafka_interceptor_add_on_thread_start() 5054 */ 5055 typedef enum rd_kafka_thread_type_t { 5056 RD_KAFKA_THREAD_MAIN, /**< librdkafka's internal main thread */ 5057 RD_KAFKA_THREAD_BACKGROUND, /**< Background thread (if enabled) */ 5058 RD_KAFKA_THREAD_BROKER /**< Per-broker thread */ 5059 } rd_kafka_thread_type_t; 5060 5061 5062 /** 5063 * @brief Wait for all rd_kafka_t objects to be destroyed. 5064 * 5065 * Returns 0 if all kafka objects are now destroyed, or -1 if the 5066 * timeout was reached. 5067 * 5068 * @remark This function is deprecated. 5069 */ 5070 RD_EXPORT 5071 int rd_kafka_wait_destroyed(int timeout_ms); 5072 5073 5074 /** 5075 * @brief Run librdkafka's built-in unit-tests. 5076 * 5077 * @returns the number of failures, or 0 if all tests passed. 5078 */ 5079 RD_EXPORT 5080 int rd_kafka_unittest(void); 5081 5082 5083 /**@}*/ 5084 5085 5086 5087 /** 5088 * @name Experimental APIs 5089 * @{ 5090 */ 5091 5092 /** 5093 * @brief Redirect the main (rd_kafka_poll()) queue to the KafkaConsumer's 5094 * queue (rd_kafka_consumer_poll()). 5095 * 5096 * @warning It is not permitted to call rd_kafka_poll() after directing the 5097 * main queue with rd_kafka_poll_set_consumer(). 5098 */ 5099 RD_EXPORT 5100 rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk); 5101 5102 5103 /**@}*/ 5104 5105 /** 5106 * @name Event interface 5107 * 5108 * @brief The event API provides an alternative pollable non-callback interface 5109 * to librdkafka's message and event queues. 5110 * 5111 * @{ 5112 */ 5113 5114 5115 /** 5116 * @brief Event types 5117 */ 5118 typedef int rd_kafka_event_type_t; 5119 #define RD_KAFKA_EVENT_NONE 0x0 /**< Unset value */ 5120 #define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */ 5121 #define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */ 5122 #define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */ 5123 #define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */ 5124 #define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */ 5125 #define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */ 5126 #define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */ 5127 #define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */ 5128 #define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */ 5129 #define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT \ 5130 102 /**< CreatePartitions_result_t */ 5131 #define RD_KAFKA_EVENT_ALTERCONFIGS_RESULT 103 /**< AlterConfigs_result_t */ 5132 #define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT \ 5133 104 /**< DescribeConfigs_result_t */ 5134 #define RD_KAFKA_EVENT_DELETERECORDS_RESULT 105 /**< DeleteRecords_result_t */ 5135 #define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */ 5136 /** DeleteConsumerGroupOffsets_result_t */ 5137 #define RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 107 5138 /** SASL/OAUTHBEARER token needs to be refreshed */ 5139 #define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100 5140 #define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */ 5141 #define RD_KAFKA_EVENT_CREATEACLS_RESULT 0x400 /**< CreateAcls_result_t */ 5142 #define RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 0x800 /**< DescribeAcls_result_t */ 5143 #define RD_KAFKA_EVENT_DELETEACLS_RESULT 0x1000 /**< DeleteAcls_result_t */ 5144 5145 /** 5146 * @returns the event type for the given event. 5147 * 5148 * @remark As a convenience it is okay to pass \p rkev as NULL in which case 5149 * RD_KAFKA_EVENT_NONE is returned. 5150 */ 5151 RD_EXPORT 5152 rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev); 5153 5154 /** 5155 * @returns the event type's name for the given event. 5156 * 5157 * @remark As a convenience it is okay to pass \p rkev as NULL in which case 5158 * the name for RD_KAFKA_EVENT_NONE is returned. 5159 */ 5160 RD_EXPORT 5161 const char *rd_kafka_event_name(const rd_kafka_event_t *rkev); 5162 5163 5164 /** 5165 * @brief Destroy an event. 5166 * 5167 * @remark Any references to this event, such as extracted messages, 5168 * will not be usable after this call. 5169 * 5170 * @remark As a convenience it is okay to pass \p rkev as NULL in which case 5171 * no action is performed. 5172 */ 5173 RD_EXPORT 5174 void rd_kafka_event_destroy(rd_kafka_event_t *rkev); 5175 5176 5177 /** 5178 * @returns the next message from an event. 5179 * 5180 * Call repeatedly until it returns NULL. 5181 * 5182 * Event types: 5183 * - RD_KAFKA_EVENT_FETCH (1 message) 5184 * - RD_KAFKA_EVENT_DR (>=1 message(s)) 5185 * 5186 * @remark The returned message(s) MUST NOT be 5187 * freed with rd_kafka_message_destroy(). 5188 * 5189 * @remark on_consume() interceptor may be called 5190 * from this function prior to passing message to application. 5191 */ 5192 RD_EXPORT 5193 const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev); 5194 5195 5196 /** 5197 * @brief Extacts \p size message(s) from the event into the 5198 * pre-allocated array \p rkmessages. 5199 * 5200 * Event types: 5201 * - RD_KAFKA_EVENT_FETCH (1 message) 5202 * - RD_KAFKA_EVENT_DR (>=1 message(s)) 5203 * 5204 * @returns the number of messages extracted. 5205 * 5206 * @remark on_consume() interceptor may be called 5207 * from this function prior to passing message to application. 5208 */ 5209 RD_EXPORT 5210 size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, 5211 const rd_kafka_message_t **rkmessages, 5212 size_t size); 5213 5214 5215 /** 5216 * @returns the number of remaining messages in the event. 5217 * 5218 * Event types: 5219 * - RD_KAFKA_EVENT_FETCH (1 message) 5220 * - RD_KAFKA_EVENT_DR (>=1 message(s)) 5221 */ 5222 RD_EXPORT 5223 size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev); 5224 5225 5226 /** 5227 * @returns the associated configuration string for the event, or NULL 5228 * if the configuration property is not set or if 5229 * not applicable for the given event type. 5230 * 5231 * The returned memory is read-only and its lifetime is the same as the 5232 * event object. 5233 * 5234 * Event types: 5235 * - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: value of sasl.oauthbearer.config 5236 */ 5237 RD_EXPORT 5238 const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev); 5239 5240 5241 /** 5242 * @returns the error code for the event. 5243 * 5244 * Use rd_kafka_event_error_is_fatal() to detect if this is a fatal error. 5245 * 5246 * Event types: 5247 * - all 5248 */ 5249 RD_EXPORT 5250 rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev); 5251 5252 5253 /** 5254 * @returns the error string (if any). 5255 * An application should check that rd_kafka_event_error() returns 5256 * non-zero before calling this function. 5257 * 5258 * Event types: 5259 * - all 5260 */ 5261 RD_EXPORT 5262 const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev); 5263 5264 5265 /** 5266 * @returns 1 if the error is a fatal error, else 0. 5267 * 5268 * Event types: 5269 * - RD_KAFKA_EVENT_ERROR 5270 * 5271 * @sa rd_kafka_fatal_error() 5272 */ 5273 RD_EXPORT 5274 int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev); 5275 5276 5277 /** 5278 * @returns the event opaque (if any) as passed to rd_kafka_commit() (et.al) or 5279 * rd_kafka_AdminOptions_set_opaque(), depending on event type. 5280 * 5281 * Event types: 5282 * - RD_KAFKA_EVENT_OFFSET_COMMIT 5283 * - RD_KAFKA_EVENT_CREATETOPICS_RESULT 5284 * - RD_KAFKA_EVENT_DELETETOPICS_RESULT 5285 * - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT 5286 * - RD_KAFKA_EVENT_CREATEACLS_RESULT 5287 * - RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 5288 * - RD_KAFKA_EVENT_DELETEACLS_RESULT 5289 * - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT 5290 * - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT 5291 * - RD_KAFKA_EVENT_DELETEGROUPS_RESULT 5292 * - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 5293 * - RD_KAFKA_EVENT_DELETERECORDS_RESULT 5294 */ 5295 RD_EXPORT 5296 void *rd_kafka_event_opaque(rd_kafka_event_t *rkev); 5297 5298 5299 /** 5300 * @brief Extract log message from the event. 5301 * 5302 * Event types: 5303 * - RD_KAFKA_EVENT_LOG 5304 * 5305 * @returns 0 on success or -1 if unsupported event type. 5306 */ 5307 RD_EXPORT 5308 int rd_kafka_event_log(rd_kafka_event_t *rkev, 5309 const char **fac, 5310 const char **str, 5311 int *level); 5312 5313 5314 /** 5315 * @brief Extract log debug context from event. 5316 * 5317 * Event types: 5318 * - RD_KAFKA_EVENT_LOG 5319 * 5320 * @param rkev the event to extract data from. 5321 * @param dst destination string for comma separated list. 5322 * @param dstsize size of provided dst buffer. 5323 * @returns 0 on success or -1 if unsupported event type. 5324 */ 5325 RD_EXPORT 5326 int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, 5327 char *dst, 5328 size_t dstsize); 5329 5330 5331 /** 5332 * @brief Extract stats from the event. 5333 * 5334 * Event types: 5335 * - RD_KAFKA_EVENT_STATS 5336 * 5337 * @returns stats json string. 5338 * 5339 * @remark the returned string will be freed automatically along with the event 5340 * object 5341 * 5342 */ 5343 RD_EXPORT 5344 const char *rd_kafka_event_stats(rd_kafka_event_t *rkev); 5345 5346 5347 /** 5348 * @returns the topic partition list from the event. 5349 * 5350 * @remark The list MUST NOT be freed with 5351 * rd_kafka_topic_partition_list_destroy() 5352 * 5353 * Event types: 5354 * - RD_KAFKA_EVENT_REBALANCE 5355 * - RD_KAFKA_EVENT_OFFSET_COMMIT 5356 */ 5357 RD_EXPORT rd_kafka_topic_partition_list_t * 5358 rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev); 5359 5360 5361 /** 5362 * @returns a newly allocated topic_partition container, if applicable for the 5363 * event type, else NULL. 5364 * 5365 * @remark The returned pointer MUST be freed with 5366 * rd_kafka_topic_partition_destroy(). 5367 * 5368 * Event types: 5369 * RD_KAFKA_EVENT_ERROR (for partition level errors) 5370 */ 5371 RD_EXPORT rd_kafka_topic_partition_t * 5372 rd_kafka_event_topic_partition(rd_kafka_event_t *rkev); 5373 5374 5375 /*! CreateTopics result type */ 5376 typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t; 5377 /*! DeleteTopics result type */ 5378 typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t; 5379 /*! CreateAcls result type */ 5380 typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t; 5381 /*! DescribeAcls result type */ 5382 typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t; 5383 /*! DeleteAcls result type */ 5384 typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t; 5385 /*! CreatePartitions result type */ 5386 typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t; 5387 /*! AlterConfigs result type */ 5388 typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t; 5389 /*! CreateTopics result type */ 5390 typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t; 5391 /*! DeleteRecords result type */ 5392 typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t; 5393 /*! DeleteGroups result type */ 5394 typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t; 5395 /*! DeleteConsumerGroupOffsets result type */ 5396 typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t; 5397 5398 /** 5399 * @brief Get CreateTopics result. 5400 * 5401 * @returns the result of a CreateTopics request, or NULL if event is of 5402 * different type. 5403 * 5404 * Event types: 5405 * RD_KAFKA_EVENT_CREATETOPICS_RESULT 5406 */ 5407 RD_EXPORT const rd_kafka_CreateTopics_result_t * 5408 rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev); 5409 5410 /** 5411 * @brief Get DeleteTopics result. 5412 * 5413 * @returns the result of a DeleteTopics request, or NULL if event is of 5414 * different type. 5415 * 5416 * Event types: 5417 * RD_KAFKA_EVENT_DELETETOPICS_RESULT 5418 */ 5419 RD_EXPORT const rd_kafka_DeleteTopics_result_t * 5420 rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev); 5421 5422 /** 5423 * @brief Get CreatePartitions result. 5424 * 5425 * @returns the result of a CreatePartitions request, or NULL if event is of 5426 * different type. 5427 * 5428 * Event types: 5429 * RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT 5430 */ 5431 RD_EXPORT const rd_kafka_CreatePartitions_result_t * 5432 rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev); 5433 5434 /** 5435 * @brief Get AlterConfigs result. 5436 * 5437 * @returns the result of a AlterConfigs request, or NULL if event is of 5438 * different type. 5439 * 5440 * Event types: 5441 * RD_KAFKA_EVENT_ALTERCONFIGS_RESULT 5442 */ 5443 RD_EXPORT const rd_kafka_AlterConfigs_result_t * 5444 rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev); 5445 5446 /** 5447 * @brief Get DescribeConfigs result. 5448 * 5449 * @returns the result of a DescribeConfigs request, or NULL if event is of 5450 * different type. 5451 * 5452 * Event types: 5453 * RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT 5454 */ 5455 RD_EXPORT const rd_kafka_DescribeConfigs_result_t * 5456 rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev); 5457 5458 /** 5459 * @returns the result of a DeleteRecords request, or NULL if event is of 5460 * different type. 5461 * 5462 * Event types: 5463 * RD_KAFKA_EVENT_DELETERECORDS_RESULT 5464 */ 5465 RD_EXPORT const rd_kafka_DeleteRecords_result_t * 5466 rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev); 5467 5468 /** 5469 * @brief Get DeleteGroups result. 5470 * 5471 * @returns the result of a DeleteGroups request, or NULL if event is of 5472 * different type. 5473 * 5474 * Event types: 5475 * RD_KAFKA_EVENT_DELETEGROUPS_RESULT 5476 */ 5477 RD_EXPORT const rd_kafka_DeleteGroups_result_t * 5478 rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev); 5479 5480 /** 5481 * @brief Get DeleteConsumerGroupOffsets result. 5482 * 5483 * @returns the result of a DeleteConsumerGroupOffsets request, or NULL if 5484 * event is of different type. 5485 * 5486 * Event types: 5487 * RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 5488 */ 5489 RD_EXPORT const rd_kafka_DeleteConsumerGroupOffsets_result_t * 5490 rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev); 5491 5492 /** 5493 * @returns the result of a CreateAcls request, or NULL if event is of 5494 * different type. 5495 * 5496 * Event types: 5497 * RD_KAFKA_EVENT_CREATEACLS_RESULT 5498 */ 5499 RD_EXPORT const rd_kafka_CreateAcls_result_t * 5500 rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev); 5501 5502 /** 5503 * @returns the result of a DescribeAcls request, or NULL if event is of 5504 * different type. 5505 * 5506 * Event types: 5507 * RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 5508 */ 5509 RD_EXPORT const rd_kafka_DescribeAcls_result_t * 5510 rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev); 5511 5512 /** 5513 * @returns the result of a DeleteAcls request, or NULL if event is of 5514 * different type. 5515 * 5516 * Event types: 5517 * RD_KAFKA_EVENT_DELETEACLS_RESULT 5518 */ 5519 RD_EXPORT const rd_kafka_DeleteAcls_result_t * 5520 rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev); 5521 5522 /** 5523 * @brief Poll a queue for an event for max \p timeout_ms. 5524 * 5525 * @returns an event, or NULL. 5526 * 5527 * @remark Use rd_kafka_event_destroy() to free the event. 5528 * 5529 * @sa rd_kafka_conf_set_background_event_cb() 5530 */ 5531 RD_EXPORT 5532 rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms); 5533 5534 /** 5535 * @brief Poll a queue for events served through callbacks for max \p 5536 * timeout_ms. 5537 * 5538 * @returns the number of events served. 5539 * 5540 * @remark This API must only be used for queues with callbacks registered 5541 * for all expected event types. E.g., not a message queue. 5542 * 5543 * @remark Also see rd_kafka_conf_set_background_event_cb() for triggering 5544 * event callbacks from a librdkafka-managed background thread. 5545 * 5546 * @sa rd_kafka_conf_set_background_event_cb() 5547 */ 5548 RD_EXPORT 5549 int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms); 5550 5551 5552 /**@}*/ 5553 5554 5555 /** 5556 * @name Plugin interface 5557 * 5558 * @brief A plugin interface that allows external runtime-loaded libraries 5559 * to integrate with a client instance without modifications to 5560 * the application code. 5561 * 5562 * Plugins are loaded when referenced through the `plugin.library.paths` 5563 * configuration property and operates on the \c rd_kafka_conf_t 5564 * object prior \c rd_kafka_t instance creation. 5565 * 5566 * @warning Plugins require the application to link librdkafka dynamically 5567 * and not statically. Failure to do so will lead to missing symbols 5568 * or finding symbols in another librdkafka library than the 5569 * application was linked with. 5570 */ 5571 5572 5573 /** 5574 * @brief Plugin's configuration initializer method called each time the 5575 * library is referenced from configuration (even if previously loaded by 5576 * another client instance). 5577 * 5578 * @remark This method MUST be implemented by plugins and have the symbol name 5579 * \c conf_init 5580 * 5581 * @param conf Configuration set up to this point. 5582 * @param plug_opaquep Plugin can set this pointer to a per-configuration 5583 * opaque pointer. 5584 * @param errstr String buffer of size \p errstr_size where plugin must write 5585 * a human readable error string in the case the initializer 5586 * fails (returns non-zero). 5587 * @param errstr_size Maximum space (including \0) in \p errstr. 5588 * 5589 * @remark A plugin may add an on_conf_destroy() interceptor to clean up 5590 * plugin-specific resources created in the plugin's conf_init() method. 5591 * 5592 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. 5593 */ 5594 typedef rd_kafka_resp_err_t(rd_kafka_plugin_f_conf_init_t)( 5595 rd_kafka_conf_t *conf, 5596 void **plug_opaquep, 5597 char *errstr, 5598 size_t errstr_size); 5599 5600 /**@}*/ 5601 5602 5603 5604 /** 5605 * @name Interceptors 5606 * 5607 * @{ 5608 * 5609 * @brief A callback interface that allows message interception for both 5610 * producer and consumer data pipelines. 5611 * 5612 * Except for the on_new(), on_conf_set(), on_conf_dup() and on_conf_destroy() 5613 * interceptors, interceptors are added to the 5614 * newly created rd_kafka_t client instance. These interceptors MUST only 5615 * be added from on_new() and MUST NOT be added after rd_kafka_new() returns. 5616 * 5617 * The on_new(), on_conf_set(), on_conf_dup() and on_conf_destroy() interceptors 5618 * are added to the configuration object which is later passed to 5619 * rd_kafka_new() where on_new() is called to allow addition of 5620 * other interceptors. 5621 * 5622 * Each interceptor reference consists of a display name (ic_name), 5623 * a callback function, and an application-specified opaque value that is 5624 * passed as-is to the callback. 5625 * The ic_name must be unique for the interceptor implementation and is used 5626 * to reject duplicate interceptor methods. 5627 * 5628 * Any number of interceptors can be added and they are called in the order 5629 * they were added, unless otherwise noted. 5630 * The list of registered interceptor methods are referred to as 5631 * interceptor chains. 5632 * 5633 * @remark Contrary to the Java client the librdkafka interceptor interface 5634 * does not support message key and value modification. 5635 * Message mutability is discouraged in the Java client and the 5636 * combination of serializers and headers cover most use-cases. 5637 * 5638 * @remark Interceptors are NOT copied to the new configuration on 5639 * rd_kafka_conf_dup() since it would be hard for interceptors to 5640 * track usage of the interceptor's opaque value. 5641 * An interceptor should rely on the plugin, which will be copied 5642 * in rd_kafka_conf_conf_dup(), to set up the initial interceptors. 5643 * An interceptor should implement the on_conf_dup() method 5644 * to manually set up its internal configuration on the newly created 5645 * configuration object that is being copied-to based on the 5646 * interceptor-specific configuration properties. 5647 * conf_dup() should thus be treated the same as conf_init(). 5648 * 5649 * @remark Interceptors are keyed by the interceptor type (on_..()), the 5650 * interceptor name (ic_name) and the interceptor method function. 5651 * Duplicates are not allowed and the .._add_on_..() method will 5652 * return RD_KAFKA_RESP_ERR__CONFLICT if attempting to add a duplicate 5653 * method. 5654 * The only exception is on_conf_destroy() which may be added multiple 5655 * times by the same interceptor to allow proper cleanup of 5656 * interceptor configuration state. 5657 */ 5658 5659 5660 /** 5661 * @brief on_conf_set() is called from rd_kafka_*_conf_set() in the order 5662 * the interceptors were added. 5663 * 5664 * @param conf Configuration object. 5665 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5666 * @param name The configuration property to set. 5667 * @param val The configuration value to set, or NULL for reverting to default 5668 * in which case the previous value should be freed. 5669 * @param errstr A human readable error string in case the interceptor fails. 5670 * @param errstr_size Maximum space (including \0) in \p errstr. 5671 * 5672 * @returns RD_KAFKA_CONF_OK if the property was known and successfully 5673 * handled by the interceptor, RD_KAFKA_CONF_INVALID if the 5674 * property was handled by the interceptor but the value was invalid, 5675 * or RD_KAFKA_CONF_UNKNOWN if the interceptor did not handle 5676 * this property, in which case the property is passed on on the 5677 * interceptor in the chain, finally ending up at the built-in 5678 * configuration handler. 5679 */ 5680 typedef rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t)( 5681 rd_kafka_conf_t *conf, 5682 const char *name, 5683 const char *val, 5684 char *errstr, 5685 size_t errstr_size, 5686 void *ic_opaque); 5687 5688 5689 /** 5690 * @brief on_conf_dup() is called from rd_kafka_conf_dup() in the 5691 * order the interceptors were added and is used to let 5692 * an interceptor re-register its conf interecptors with a new 5693 * opaque value. 5694 * The on_conf_dup() method is called prior to the configuration from 5695 * \p old_conf being copied to \p new_conf. 5696 * 5697 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5698 * @param new_conf New configuration object. 5699 * @param old_conf Old configuration object to copy properties from. 5700 * @param filter_cnt Number of property names to filter in \p filter. 5701 * @param filter Property names to filter out (ignore) when setting up 5702 * \p new_conf. 5703 * 5704 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code 5705 * on failure (which is logged but otherwise ignored). 5706 * 5707 * @remark No on_conf_* interceptors are copied to the new configuration 5708 * object on rd_kafka_conf_dup(). 5709 */ 5710 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t)( 5711 rd_kafka_conf_t *new_conf, 5712 const rd_kafka_conf_t *old_conf, 5713 size_t filter_cnt, 5714 const char **filter, 5715 void *ic_opaque); 5716 5717 5718 /** 5719 * @brief on_conf_destroy() is called from rd_kafka_*_conf_destroy() in the 5720 * order the interceptors were added. 5721 * 5722 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5723 */ 5724 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t)( 5725 void *ic_opaque); 5726 5727 5728 /** 5729 * @brief on_new() is called from rd_kafka_new() prior toreturning 5730 * the newly created client instance to the application. 5731 * 5732 * @param rk The client instance. 5733 * @param conf The client instance's final configuration. 5734 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5735 * @param errstr A human readable error string in case the interceptor fails. 5736 * @param errstr_size Maximum space (including \0) in \p errstr. 5737 * 5738 * @returns an error code on failure, the error is logged but otherwise ignored. 5739 * 5740 * @warning The \p rk client instance will not be fully set up when this 5741 * interceptor is called and the interceptor MUST NOT call any 5742 * other rk-specific APIs than rd_kafka_interceptor_add..(). 5743 * 5744 */ 5745 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t)( 5746 rd_kafka_t *rk, 5747 const rd_kafka_conf_t *conf, 5748 void *ic_opaque, 5749 char *errstr, 5750 size_t errstr_size); 5751 5752 5753 /** 5754 * @brief on_destroy() is called from rd_kafka_destroy() or (rd_kafka_new() 5755 * if rd_kafka_new() fails during initialization). 5756 * 5757 * @param rk The client instance. 5758 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5759 */ 5760 typedef rd_kafka_resp_err_t( 5761 rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque); 5762 5763 5764 5765 /** 5766 * @brief on_send() is called from rd_kafka_produce*() (et.al) prior to 5767 * the partitioner being called. 5768 * 5769 * @param rk The client instance. 5770 * @param rkmessage The message being produced. Immutable. 5771 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5772 * 5773 * @remark This interceptor is only used by producer instances. 5774 * 5775 * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified 5776 * by the interceptor. 5777 * 5778 * @remark If the partitioner fails or an unknown partition was specified, 5779 * the on_acknowledgement() interceptor chain will be called from 5780 * within the rd_kafka_produce*() call to maintain send-acknowledgement 5781 * symmetry. 5782 * 5783 * @returns an error code on failure, the error is logged but otherwise ignored. 5784 */ 5785 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t)( 5786 rd_kafka_t *rk, 5787 rd_kafka_message_t *rkmessage, 5788 void *ic_opaque); 5789 5790 /** 5791 * @brief on_acknowledgement() is called to inform interceptors that a message 5792 * was succesfully delivered or permanently failed delivery. 5793 * The interceptor chain is called from internal librdkafka background 5794 * threads, or rd_kafka_produce*() if the partitioner failed. 5795 * 5796 * @param rk The client instance. 5797 * @param rkmessage The message being produced. Immutable. 5798 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5799 * 5800 * @remark This interceptor is only used by producer instances. 5801 * 5802 * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified 5803 * by the interceptor. 5804 * 5805 * @warning The on_acknowledgement() method may be called from internal 5806 * librdkafka threads. An on_acknowledgement() interceptor MUST NOT 5807 * call any librdkafka API's associated with the \p rk, or perform 5808 * any blocking or prolonged work. 5809 * 5810 * @returns an error code on failure, the error is logged but otherwise ignored. 5811 */ 5812 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t)( 5813 rd_kafka_t *rk, 5814 rd_kafka_message_t *rkmessage, 5815 void *ic_opaque); 5816 5817 5818 /** 5819 * @brief on_consume() is called just prior to passing the message to the 5820 * application in rd_kafka_consumer_poll(), rd_kafka_consume*(), 5821 * the event interface, etc. 5822 * 5823 * @param rk The client instance. 5824 * @param rkmessage The message being consumed. Immutable. 5825 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5826 * 5827 * @remark This interceptor is only used by consumer instances. 5828 * 5829 * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified 5830 * by the interceptor. 5831 * 5832 * @returns an error code on failure, the error is logged but otherwise ignored. 5833 */ 5834 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t)( 5835 rd_kafka_t *rk, 5836 rd_kafka_message_t *rkmessage, 5837 void *ic_opaque); 5838 5839 /** 5840 * @brief on_commit() is called on completed or failed offset commit. 5841 * It is called from internal librdkafka threads. 5842 * 5843 * @param rk The client instance. 5844 * @param offsets List of topic+partition+offset+error that were committed. 5845 * The error message of each partition should be checked for 5846 * error. 5847 * @param err The commit error, if any. 5848 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5849 * 5850 * @remark This interceptor is only used by consumer instances. 5851 * 5852 * @warning The on_commit() interceptor is called from internal 5853 * librdkafka threads. An on_commit() interceptor MUST NOT 5854 * call any librdkafka API's associated with the \p rk, or perform 5855 * any blocking or prolonged work. 5856 * 5857 * 5858 * @returns an error code on failure, the error is logged but otherwise ignored. 5859 */ 5860 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t)( 5861 rd_kafka_t *rk, 5862 const rd_kafka_topic_partition_list_t *offsets, 5863 rd_kafka_resp_err_t err, 5864 void *ic_opaque); 5865 5866 5867 /** 5868 * @brief on_request_sent() is called when a request has been fully written 5869 * to a broker TCP connections socket. 5870 * 5871 * @param rk The client instance. 5872 * @param sockfd Socket file descriptor. 5873 * @param brokername Broker request is being sent to. 5874 * @param brokerid Broker request is being sent to. 5875 * @param ApiKey Kafka protocol request type. 5876 * @param ApiVersion Kafka protocol request type version. 5877 * @param CorrId Kafka protocol request correlation id. 5878 * @param size Size of request. 5879 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5880 * 5881 * @warning The on_request_sent() interceptor is called from internal 5882 * librdkafka broker threads. An on_request_sent() interceptor MUST NOT 5883 * call any librdkafka API's associated with the \p rk, or perform 5884 * any blocking or prolonged work. 5885 * 5886 * @returns an error code on failure, the error is logged but otherwise ignored. 5887 */ 5888 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t)( 5889 rd_kafka_t *rk, 5890 int sockfd, 5891 const char *brokername, 5892 int32_t brokerid, 5893 int16_t ApiKey, 5894 int16_t ApiVersion, 5895 int32_t CorrId, 5896 size_t size, 5897 void *ic_opaque); 5898 5899 5900 /** 5901 * @brief on_response_received() is called when a protocol response has been 5902 * fully received from a broker TCP connection socket but before the 5903 * response payload is parsed. 5904 * 5905 * @param rk The client instance. 5906 * @param sockfd Socket file descriptor (always -1). 5907 * @param brokername Broker response was received from, possibly empty string 5908 * on error. 5909 * @param brokerid Broker response was received from. 5910 * @param ApiKey Kafka protocol request type or -1 on error. 5911 * @param ApiVersion Kafka protocol request type version or -1 on error. 5912 * @param CorrId Kafka protocol request correlation id, possibly -1 on error. 5913 * @param size Size of response, possibly 0 on error. 5914 * @param rtt Request round-trip-time in microseconds, possibly -1 on error. 5915 * @param err Receive error. 5916 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5917 * 5918 * @warning The on_response_received() interceptor is called from internal 5919 * librdkafka broker threads. An on_response_received() interceptor 5920 * MUST NOT call any librdkafka API's associated with the \p rk, or 5921 * perform any blocking or prolonged work. 5922 * 5923 * @returns an error code on failure, the error is logged but otherwise ignored. 5924 */ 5925 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_response_received_t)( 5926 rd_kafka_t *rk, 5927 int sockfd, 5928 const char *brokername, 5929 int32_t brokerid, 5930 int16_t ApiKey, 5931 int16_t ApiVersion, 5932 int32_t CorrId, 5933 size_t size, 5934 int64_t rtt, 5935 rd_kafka_resp_err_t err, 5936 void *ic_opaque); 5937 5938 5939 /** 5940 * @brief on_thread_start() is called from a newly created librdkafka-managed 5941 * thread. 5942 5943 * @param rk The client instance. 5944 * @param thread_type Thread type. 5945 * @param thread_name Human-readable thread name, may not be unique. 5946 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5947 * 5948 * @warning The on_thread_start() interceptor is called from internal 5949 * librdkafka threads. An on_thread_start() interceptor MUST NOT 5950 * call any librdkafka API's associated with the \p rk, or perform 5951 * any blocking or prolonged work. 5952 * 5953 * @returns an error code on failure, the error is logged but otherwise ignored. 5954 */ 5955 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_start_t)( 5956 rd_kafka_t *rk, 5957 rd_kafka_thread_type_t thread_type, 5958 const char *thread_name, 5959 void *ic_opaque); 5960 5961 5962 /** 5963 * @brief on_thread_exit() is called just prior to a librdkafka-managed 5964 * thread exiting from the exiting thread itself. 5965 * 5966 * @param rk The client instance. 5967 * @param thread_type Thread type.n 5968 * @param thread_name Human-readable thread name, may not be unique. 5969 * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). 5970 * 5971 * @remark Depending on the thread type, librdkafka may execute additional 5972 * code on the thread after on_thread_exit() returns. 5973 * 5974 * @warning The on_thread_exit() interceptor is called from internal 5975 * librdkafka threads. An on_thread_exit() interceptor MUST NOT 5976 * call any librdkafka API's associated with the \p rk, or perform 5977 * any blocking or prolonged work. 5978 * 5979 * @returns an error code on failure, the error is logged but otherwise ignored. 5980 */ 5981 typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t)( 5982 rd_kafka_t *rk, 5983 rd_kafka_thread_type_t thread_type, 5984 const char *thread_name, 5985 void *ic_opaque); 5986 5987 5988 5989 /** 5990 * @brief Append an on_conf_set() interceptor. 5991 * 5992 * @param conf Configuration object. 5993 * @param ic_name Interceptor name, used in logging. 5994 * @param on_conf_set Function pointer. 5995 * @param ic_opaque Opaque value that will be passed to the function. 5996 * 5997 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 5998 * if an existing intercepted with the same \p ic_name and function 5999 * has already been added to \p conf. 6000 */ 6001 RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set( 6002 rd_kafka_conf_t *conf, 6003 const char *ic_name, 6004 rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, 6005 void *ic_opaque); 6006 6007 6008 /** 6009 * @brief Append an on_conf_dup() interceptor. 6010 * 6011 * @param conf Configuration object. 6012 * @param ic_name Interceptor name, used in logging. 6013 * @param on_conf_dup Function pointer. 6014 * @param ic_opaque Opaque value that will be passed to the function. 6015 * 6016 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6017 * if an existing intercepted with the same \p ic_name and function 6018 * has already been added to \p conf. 6019 */ 6020 RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup( 6021 rd_kafka_conf_t *conf, 6022 const char *ic_name, 6023 rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, 6024 void *ic_opaque); 6025 6026 /** 6027 * @brief Append an on_conf_destroy() interceptor. 6028 * 6029 * @param conf Configuration object. 6030 * @param ic_name Interceptor name, used in logging. 6031 * @param on_conf_destroy Function pointer. 6032 * @param ic_opaque Opaque value that will be passed to the function. 6033 * 6034 * @returns RD_KAFKA_RESP_ERR_NO_ERROR 6035 * 6036 * @remark Multiple on_conf_destroy() interceptors are allowed to be added 6037 * to the same configuration object. 6038 */ 6039 RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy( 6040 rd_kafka_conf_t *conf, 6041 const char *ic_name, 6042 rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, 6043 void *ic_opaque); 6044 6045 6046 /** 6047 * @brief Append an on_new() interceptor. 6048 * 6049 * @param conf Configuration object. 6050 * @param ic_name Interceptor name, used in logging. 6051 * @param on_new Function pointer. 6052 * @param ic_opaque Opaque value that will be passed to the function. 6053 * 6054 * @remark Since the on_new() interceptor is added to the configuration object 6055 * it may be copied by rd_kafka_conf_dup(). 6056 * An interceptor implementation must thus be able to handle 6057 * the same interceptor,ic_opaque tuple to be used by multiple 6058 * client instances. 6059 * 6060 * @remark An interceptor plugin should check the return value to make sure it 6061 * has not already been added. 6062 * 6063 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6064 * if an existing intercepted with the same \p ic_name and function 6065 * has already been added to \p conf. 6066 */ 6067 RD_EXPORT rd_kafka_resp_err_t 6068 rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, 6069 const char *ic_name, 6070 rd_kafka_interceptor_f_on_new_t *on_new, 6071 void *ic_opaque); 6072 6073 6074 6075 /** 6076 * @brief Append an on_destroy() interceptor. 6077 * 6078 * @param rk Client instance. 6079 * @param ic_name Interceptor name, used in logging. 6080 * @param on_destroy Function pointer. 6081 * @param ic_opaque Opaque value that will be passed to the function. 6082 * 6083 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6084 * if an existing intercepted with the same \p ic_name and function 6085 * has already been added to \p conf. 6086 */ 6087 RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy( 6088 rd_kafka_t *rk, 6089 const char *ic_name, 6090 rd_kafka_interceptor_f_on_destroy_t *on_destroy, 6091 void *ic_opaque); 6092 6093 6094 /** 6095 * @brief Append an on_send() interceptor. 6096 * 6097 * @param rk Client instance. 6098 * @param ic_name Interceptor name, used in logging. 6099 * @param on_send Function pointer. 6100 * @param ic_opaque Opaque value that will be passed to the function. 6101 * 6102 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6103 * if an existing intercepted with the same \p ic_name and function 6104 * has already been added to \p conf. 6105 */ 6106 RD_EXPORT rd_kafka_resp_err_t 6107 rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, 6108 const char *ic_name, 6109 rd_kafka_interceptor_f_on_send_t *on_send, 6110 void *ic_opaque); 6111 6112 /** 6113 * @brief Append an on_acknowledgement() interceptor. 6114 * 6115 * @param rk Client instance. 6116 * @param ic_name Interceptor name, used in logging. 6117 * @param on_acknowledgement Function pointer. 6118 * @param ic_opaque Opaque value that will be passed to the function. 6119 * 6120 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6121 * if an existing intercepted with the same \p ic_name and function 6122 * has already been added to \p conf. 6123 */ 6124 RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement( 6125 rd_kafka_t *rk, 6126 const char *ic_name, 6127 rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, 6128 void *ic_opaque); 6129 6130 6131 /** 6132 * @brief Append an on_consume() interceptor. 6133 * 6134 * @param rk Client instance. 6135 * @param ic_name Interceptor name, used in logging. 6136 * @param on_consume Function pointer. 6137 * @param ic_opaque Opaque value that will be passed to the function. 6138 * 6139 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6140 * if an existing intercepted with the same \p ic_name and function 6141 * has already been added to \p conf. 6142 */ 6143 RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume( 6144 rd_kafka_t *rk, 6145 const char *ic_name, 6146 rd_kafka_interceptor_f_on_consume_t *on_consume, 6147 void *ic_opaque); 6148 6149 6150 /** 6151 * @brief Append an on_commit() interceptor. 6152 * 6153 * @param rk Client instance. 6154 * @param ic_name Interceptor name, used in logging. 6155 * @param on_commit() Function pointer. 6156 * @param ic_opaque Opaque value that will be passed to the function. 6157 * 6158 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6159 * if an existing intercepted with the same \p ic_name and function 6160 * has already been added to \p conf. 6161 */ 6162 RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit( 6163 rd_kafka_t *rk, 6164 const char *ic_name, 6165 rd_kafka_interceptor_f_on_commit_t *on_commit, 6166 void *ic_opaque); 6167 6168 6169 /** 6170 * @brief Append an on_request_sent() interceptor. 6171 * 6172 * @param rk Client instance. 6173 * @param ic_name Interceptor name, used in logging. 6174 * @param on_request_sent() Function pointer. 6175 * @param ic_opaque Opaque value that will be passed to the function. 6176 * 6177 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6178 * if an existing intercepted with the same \p ic_name and function 6179 * has already been added to \p conf. 6180 */ 6181 RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent( 6182 rd_kafka_t *rk, 6183 const char *ic_name, 6184 rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, 6185 void *ic_opaque); 6186 6187 6188 /** 6189 * @brief Append an on_response_received() interceptor. 6190 * 6191 * @param rk Client instance. 6192 * @param ic_name Interceptor name, used in logging. 6193 * @param on_response_received() Function pointer. 6194 * @param ic_opaque Opaque value that will be passed to the function. 6195 * 6196 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6197 * if an existing intercepted with the same \p ic_name and function 6198 * has already been added to \p conf. 6199 */ 6200 RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received( 6201 rd_kafka_t *rk, 6202 const char *ic_name, 6203 rd_kafka_interceptor_f_on_response_received_t *on_response_received, 6204 void *ic_opaque); 6205 6206 6207 /** 6208 * @brief Append an on_thread_start() interceptor. 6209 * 6210 * @param rk Client instance. 6211 * @param ic_name Interceptor name, used in logging. 6212 * @param on_thread_start() Function pointer. 6213 * @param ic_opaque Opaque value that will be passed to the function. 6214 * 6215 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6216 * if an existing intercepted with the same \p ic_name and function 6217 * has already been added to \p conf. 6218 */ 6219 RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start( 6220 rd_kafka_t *rk, 6221 const char *ic_name, 6222 rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, 6223 void *ic_opaque); 6224 6225 6226 /** 6227 * @brief Append an on_thread_exit() interceptor. 6228 * 6229 * @param rk Client instance. 6230 * @param ic_name Interceptor name, used in logging. 6231 * @param on_thread_exit() Function pointer. 6232 * @param ic_opaque Opaque value that will be passed to the function. 6233 * 6234 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT 6235 * if an existing intercepted with the same \p ic_name and function 6236 * has already been added to \p conf. 6237 */ 6238 RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit( 6239 rd_kafka_t *rk, 6240 const char *ic_name, 6241 rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, 6242 void *ic_opaque); 6243 6244 6245 6246 /**@}*/ 6247 6248 6249 6250 /** 6251 * @name Auxiliary types 6252 * 6253 * @{ 6254 */ 6255 6256 6257 6258 /** 6259 * @brief Topic result provides per-topic operation result information. 6260 * 6261 */ 6262 6263 /** 6264 * @returns the error code for the given topic result. 6265 */ 6266 RD_EXPORT rd_kafka_resp_err_t 6267 rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres); 6268 6269 /** 6270 * @returns the human readable error string for the given topic result, 6271 * or NULL if there was no error. 6272 * 6273 * @remark lifetime of the returned string is the same as the \p topicres. 6274 */ 6275 RD_EXPORT const char * 6276 rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres); 6277 6278 /** 6279 * @returns the name of the topic for the given topic result. 6280 * @remark lifetime of the returned string is the same as the \p topicres. 6281 * 6282 */ 6283 RD_EXPORT const char * 6284 rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres); 6285 6286 /** 6287 * @brief Group result provides per-group operation result information. 6288 * 6289 */ 6290 6291 /** 6292 * @returns the error for the given group result, or NULL on success. 6293 * @remark lifetime of the returned error is the same as the \p groupres. 6294 */ 6295 RD_EXPORT const rd_kafka_error_t * 6296 rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres); 6297 6298 /** 6299 * @returns the name of the group for the given group result. 6300 * @remark lifetime of the returned string is the same as the \p groupres. 6301 * 6302 */ 6303 RD_EXPORT const char * 6304 rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres); 6305 6306 /** 6307 * @returns the partitions/offsets for the given group result, if applicable 6308 * to the request type, else NULL. 6309 * @remark lifetime of the returned list is the same as the \p groupres. 6310 */ 6311 RD_EXPORT const rd_kafka_topic_partition_list_t * 6312 rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres); 6313 6314 6315 /**@}*/ 6316 6317 6318 /** 6319 * @name Admin API 6320 * @{ 6321 * 6322 * @brief The Admin API enables applications to perform administrative 6323 * Apache Kafka tasks, such as creating and deleting topics, 6324 * altering and reading broker configuration, etc. 6325 * 6326 * The Admin API is asynchronous and makes use of librdkafka's standard 6327 * \c rd_kafka_queue_t queues to propagate the result of an admin operation 6328 * back to the application. 6329 * The supplied queue may be any queue, such as a temporary single-call queue, 6330 * a shared queue used for multiple requests, or even the main queue or 6331 * consumer queues. 6332 * 6333 * Use \c rd_kafka_queue_poll() to collect the result of an admin operation 6334 * from the queue of your choice, then extract the admin API-specific result 6335 * type by using the corresponding \c rd_kafka_event_CreateTopics_result, 6336 * \c rd_kafka_event_DescribeConfigs_result, etc, methods. 6337 * Use the getter methods on the \c .._result_t type to extract response 6338 * information and finally destroy the result and event by calling 6339 * \c rd_kafka_event_destroy(). 6340 * 6341 * Use rd_kafka_event_error() and rd_kafka_event_error_string() to acquire 6342 * the request-level error/success for an Admin API request. 6343 * Even if the returned value is \c RD_KAFKA_RESP_ERR_NO_ERROR there 6344 * may be individual objects (topics, resources, etc) that have failed. 6345 * Extract per-object error information with the corresponding 6346 * \c rd_kafka_..._result_topics|resources|..() to check per-object errors. 6347 * 6348 * Locally triggered errors: 6349 * - \c RD_KAFKA_RESP_ERR__TIMED_OUT - (Controller) broker connection did not 6350 * become available in the time allowed by AdminOption_set_request_timeout. 6351 */ 6352 6353 6354 /** 6355 * @enum rd_kafka_admin_op_t 6356 * 6357 * @brief Admin operation enum name for use with rd_kafka_AdminOptions_new() 6358 * 6359 * @sa rd_kafka_AdminOptions_new() 6360 */ 6361 typedef enum rd_kafka_admin_op_t { 6362 RD_KAFKA_ADMIN_OP_ANY = 0, /**< Default value */ 6363 RD_KAFKA_ADMIN_OP_CREATETOPICS, /**< CreateTopics */ 6364 RD_KAFKA_ADMIN_OP_DELETETOPICS, /**< DeleteTopics */ 6365 RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, /**< CreatePartitions */ 6366 RD_KAFKA_ADMIN_OP_ALTERCONFIGS, /**< AlterConfigs */ 6367 RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, /**< DescribeConfigs */ 6368 RD_KAFKA_ADMIN_OP_DELETERECORDS, /**< DeleteRecords */ 6369 RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */ 6370 /** DeleteConsumerGroupOffsets */ 6371 RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, 6372 RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */ 6373 RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */ 6374 RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */ 6375 RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ 6376 } rd_kafka_admin_op_t; 6377 6378 /** 6379 * @brief AdminOptions provides a generic mechanism for setting optional 6380 * parameters for the Admin API requests. 6381 * 6382 * @remark Since AdminOptions is decoupled from the actual request type 6383 * there is no enforcement to prevent setting unrelated properties, 6384 * e.g. setting validate_only on a DescribeConfigs request is allowed 6385 * but is silently ignored by DescribeConfigs. 6386 * Future versions may introduce such enforcement. 6387 */ 6388 6389 6390 typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t; 6391 6392 /** 6393 * @brief Create a new AdminOptions object. 6394 * 6395 * The options object is not modified by the Admin API request APIs, 6396 * (e.g. CreateTopics) and may be reused for multiple calls. 6397 * 6398 * @param rk Client instance. 6399 * @param for_api Specifies what Admin API this AdminOptions object will be used 6400 * for, which will enforce what AdminOptions_set_..() calls may 6401 * be used based on the API, causing unsupported set..() calls 6402 * to fail. 6403 * Specifying RD_KAFKA_ADMIN_OP_ANY disables the enforcement 6404 * allowing any option to be set, even if the option 6405 * is not used in a future call to an Admin API method. 6406 * 6407 * @returns a new AdminOptions object (which must be freed with 6408 * rd_kafka_AdminOptions_destroy()), or NULL if \p for_api was set to 6409 * an unknown API op type. 6410 */ 6411 RD_EXPORT rd_kafka_AdminOptions_t * 6412 rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api); 6413 6414 6415 /** 6416 * @brief Destroy a AdminOptions object. 6417 */ 6418 RD_EXPORT void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options); 6419 6420 6421 /** 6422 * @brief Sets the overall request timeout, including broker lookup, 6423 * request transmission, operation time on broker, and response. 6424 * 6425 * @param options Admin options. 6426 * @param timeout_ms Timeout in milliseconds, use -1 for indefinite timeout. 6427 * Defaults to `socket.timeout.ms`. 6428 * @param errstr A human readable error string (nul-terminated) is written to 6429 * this location that must be of at least \p errstr_size bytes. 6430 * The \p errstr is only written in case of error. 6431 * @param errstr_size Writable size in \p errstr. 6432 * 6433 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or 6434 * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which 6435 * case an error string will be written \p errstr. 6436 * 6437 * @remark This option is valid for all Admin API requests. 6438 */ 6439 RD_EXPORT rd_kafka_resp_err_t 6440 rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, 6441 int timeout_ms, 6442 char *errstr, 6443 size_t errstr_size); 6444 6445 6446 /** 6447 * @brief Sets the broker's operation timeout, such as the timeout for 6448 * CreateTopics to complete the creation of topics on the controller 6449 * before returning a result to the application. 6450 * 6451 * CreateTopics: values <= 0 will return immediately after triggering topic 6452 * creation, while > 0 will wait this long for topic creation to propagate 6453 * in cluster. Default: 60 seconds. 6454 * 6455 * DeleteTopics: same semantics as CreateTopics. 6456 * CreatePartitions: same semantics as CreateTopics. 6457 * 6458 * @param options Admin options. 6459 * @param timeout_ms Timeout in milliseconds. 6460 * @param errstr A human readable error string (nul-terminated) is written to 6461 * this location that must be of at least \p errstr_size bytes. 6462 * The \p errstr is only written in case of error. 6463 * @param errstr_size Writable size in \p errstr. 6464 * 6465 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or 6466 * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which 6467 * case an error string will be written \p errstr. 6468 * 6469 * @remark This option is valid for CreateTopics, DeleteTopics, 6470 * CreatePartitions, and DeleteRecords. 6471 */ 6472 RD_EXPORT rd_kafka_resp_err_t 6473 rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, 6474 int timeout_ms, 6475 char *errstr, 6476 size_t errstr_size); 6477 6478 6479 /** 6480 * @brief Tell broker to only validate the request, without performing 6481 * the requested operation (create topics, etc). 6482 * 6483 * @param options Admin options. 6484 * @param true_or_false Defaults to false. 6485 * @param errstr A human readable error string (nul-terminated) is written to 6486 * this location that must be of at least \p errstr_size bytes. 6487 * The \p errstr is only written in case of error. 6488 * @param errstr_size Writable size in \p errstr. 6489 * 6490 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an 6491 * error code on failure in which case an error string will 6492 * be written \p errstr. 6493 * 6494 * @remark This option is valid for CreateTopics, 6495 * CreatePartitions, AlterConfigs. 6496 */ 6497 RD_EXPORT rd_kafka_resp_err_t 6498 rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, 6499 int true_or_false, 6500 char *errstr, 6501 size_t errstr_size); 6502 6503 6504 /** 6505 * @brief Override what broker the Admin request will be sent to. 6506 * 6507 * By default, Admin requests are sent to the controller broker, with 6508 * the following exceptions: 6509 * - AlterConfigs with a BROKER resource are sent to the broker id set 6510 * as the resource name. 6511 * - DescribeConfigs with a BROKER resource are sent to the broker id set 6512 * as the resource name. 6513 * 6514 * @param options Admin Options. 6515 * @param broker_id The broker to send the request to. 6516 * @param errstr A human readable error string (nul-terminated) is written to 6517 * this location that must be of at least \p errstr_size bytes. 6518 * The \p errstr is only written in case of error. 6519 * @param errstr_size Writable size in \p errstr. 6520 * 6521 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an 6522 * error code on failure in which case an error string will 6523 * be written \p errstr. 6524 * 6525 * @remark This API should typically not be used, but serves as a workaround 6526 * if new resource types are to the broker that the client 6527 * does not know where to send. 6528 */ 6529 RD_EXPORT rd_kafka_resp_err_t 6530 rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, 6531 int32_t broker_id, 6532 char *errstr, 6533 size_t errstr_size); 6534 6535 6536 6537 /** 6538 * @brief Set application opaque value that can be extracted from the 6539 * result event using rd_kafka_event_opaque() 6540 */ 6541 RD_EXPORT void 6542 rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, 6543 void *ev_opaque); 6544 6545 6546 6547 /* 6548 * CreateTopics - create topics in cluster. 6549 * 6550 */ 6551 6552 6553 /*! Defines a new topic to be created. */ 6554 typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t; 6555 6556 /** 6557 * @brief Create a new NewTopic object. This object is later passed to 6558 * rd_kafka_CreateTopics(). 6559 * 6560 * @param topic Topic name to create. 6561 * @param num_partitions Number of partitions in topic, or -1 to use the 6562 * broker's default partition count (>= 2.4.0). 6563 * @param replication_factor Default replication factor for the topic's 6564 * partitions, or -1 to use the broker's default 6565 * replication factor (>= 2.4.0) or if 6566 * set_replica_assignment() will be used. 6567 * @param errstr A human readable error string (nul-terminated) is written to 6568 * this location that must be of at least \p errstr_size bytes. 6569 * The \p errstr is only written in case of error. 6570 * @param errstr_size Writable size in \p errstr. 6571 * 6572 * 6573 * @returns a new allocated NewTopic object, or NULL if the input parameters 6574 * are invalid. 6575 * Use rd_kafka_NewTopic_destroy() to free object when done. 6576 */ 6577 RD_EXPORT rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, 6578 int num_partitions, 6579 int replication_factor, 6580 char *errstr, 6581 size_t errstr_size); 6582 6583 /** 6584 * @brief Destroy and free a NewTopic object previously created with 6585 * rd_kafka_NewTopic_new() 6586 */ 6587 RD_EXPORT void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic); 6588 6589 6590 /** 6591 * @brief Helper function to destroy all NewTopic objects in the \p new_topics 6592 * array (of \p new_topic_cnt elements). 6593 * The array itself is not freed. 6594 */ 6595 RD_EXPORT void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, 6596 size_t new_topic_cnt); 6597 6598 6599 /** 6600 * @brief Set the replica (broker) assignment for \p partition to the 6601 * replica set in \p broker_ids (of \p broker_id_cnt elements). 6602 * 6603 * @remark When this method is used, rd_kafka_NewTopic_new() must have 6604 * been called with a \c replication_factor of -1. 6605 * 6606 * @remark An application must either set the replica assignment for 6607 * all new partitions, or none. 6608 * 6609 * @remark If called, this function must be called consecutively for each 6610 * partition, starting at 0. 6611 * 6612 * @remark Use rd_kafka_metadata() to retrieve the list of brokers 6613 * in the cluster. 6614 * 6615 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code 6616 * if the arguments were invalid. 6617 * 6618 * @sa rd_kafka_AdminOptions_set_validate_only() 6619 */ 6620 RD_EXPORT rd_kafka_resp_err_t 6621 rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, 6622 int32_t partition, 6623 int32_t *broker_ids, 6624 size_t broker_id_cnt, 6625 char *errstr, 6626 size_t errstr_size); 6627 6628 /** 6629 * @brief Set (broker-side) topic configuration name/value pair. 6630 * 6631 * @remark The name and value are not validated by the client, the validation 6632 * takes place on the broker. 6633 * 6634 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code 6635 * if the arguments were invalid. 6636 * 6637 * @sa rd_kafka_AdminOptions_set_validate_only() 6638 * @sa http://kafka.apache.org/documentation.html#topicconfigs 6639 */ 6640 RD_EXPORT rd_kafka_resp_err_t 6641 rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, 6642 const char *name, 6643 const char *value); 6644 6645 6646 /** 6647 * @brief Create topics in cluster as specified by the \p new_topics 6648 * array of size \p new_topic_cnt elements. 6649 * 6650 * @param rk Client instance. 6651 * @param new_topics Array of new topics to create. 6652 * @param new_topic_cnt Number of elements in \p new_topics array. 6653 * @param options Optional admin options, or NULL for defaults. 6654 * @param rkqu Queue to emit result on. 6655 * 6656 * Supported admin options: 6657 * - rd_kafka_AdminOptions_set_validate_only() - default false 6658 * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds 6659 * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms 6660 * 6661 * @remark The result event type emitted on the supplied queue is of type 6662 * \c RD_KAFKA_EVENT_CREATETOPICS_RESULT 6663 */ 6664 RD_EXPORT void rd_kafka_CreateTopics(rd_kafka_t *rk, 6665 rd_kafka_NewTopic_t **new_topics, 6666 size_t new_topic_cnt, 6667 const rd_kafka_AdminOptions_t *options, 6668 rd_kafka_queue_t *rkqu); 6669 6670 6671 /* 6672 * CreateTopics result type and methods 6673 */ 6674 6675 /** 6676 * @brief Get an array of topic results from a CreateTopics result. 6677 * 6678 * The returned \p topics life-time is the same as the \p result object. 6679 * 6680 * @param result Result to get topics from. 6681 * @param cntp Updated to the number of elements in the array. 6682 */ 6683 RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics( 6684 const rd_kafka_CreateTopics_result_t *result, 6685 size_t *cntp); 6686 6687 6688 6689 /* 6690 * DeleteTopics - delete topics from cluster 6691 * 6692 */ 6693 6694 /*! Represents a topic to be deleted. */ 6695 typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t; 6696 6697 /** 6698 * @brief Create a new DeleteTopic object. This object is later passed to 6699 * rd_kafka_DeleteTopics(). 6700 * 6701 * @param topic Topic name to delete. 6702 * 6703 * @returns a new allocated DeleteTopic object. 6704 * Use rd_kafka_DeleteTopic_destroy() to free object when done. 6705 */ 6706 RD_EXPORT rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic); 6707 6708 /** 6709 * @brief Destroy and free a DeleteTopic object previously created with 6710 * rd_kafka_DeleteTopic_new() 6711 */ 6712 RD_EXPORT void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic); 6713 6714 /** 6715 * @brief Helper function to destroy all DeleteTopic objects in 6716 * the \p del_topics array (of \p del_topic_cnt elements). 6717 * The array itself is not freed. 6718 */ 6719 RD_EXPORT void 6720 rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, 6721 size_t del_topic_cnt); 6722 6723 /** 6724 * @brief Delete topics from cluster as specified by the \p topics 6725 * array of size \p topic_cnt elements. 6726 * 6727 * @param rk Client instance. 6728 * @param del_topics Array of topics to delete. 6729 * @param del_topic_cnt Number of elements in \p topics array. 6730 * @param options Optional admin options, or NULL for defaults. 6731 * @param rkqu Queue to emit result on. 6732 * 6733 * @remark The result event type emitted on the supplied queue is of type 6734 * \c RD_KAFKA_EVENT_DELETETOPICS_RESULT 6735 */ 6736 RD_EXPORT 6737 void rd_kafka_DeleteTopics(rd_kafka_t *rk, 6738 rd_kafka_DeleteTopic_t **del_topics, 6739 size_t del_topic_cnt, 6740 const rd_kafka_AdminOptions_t *options, 6741 rd_kafka_queue_t *rkqu); 6742 6743 6744 6745 /* 6746 * DeleteTopics result type and methods 6747 */ 6748 6749 /** 6750 * @brief Get an array of topic results from a DeleteTopics result. 6751 * 6752 * The returned \p topics life-time is the same as the \p result object. 6753 * 6754 * @param result Result to get topic results from. 6755 * @param cntp is updated to the number of elements in the array. 6756 */ 6757 RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics( 6758 const rd_kafka_DeleteTopics_result_t *result, 6759 size_t *cntp); 6760 6761 6762 6763 /* 6764 * CreatePartitions - add partitions to topic. 6765 * 6766 */ 6767 6768 /*! Defines a new partition to be created. */ 6769 typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t; 6770 6771 /** 6772 * @brief Create a new NewPartitions. This object is later passed to 6773 * rd_kafka_CreatePartitions() to increase the number of partitions 6774 * to \p new_total_cnt for an existing topic. 6775 * 6776 * @param topic Topic name to create more partitions for. 6777 * @param new_total_cnt Increase the topic's partition count to this value. 6778 * @param errstr A human readable error string (nul-terminated) is written to 6779 * this location that must be of at least \p errstr_size bytes. 6780 * The \p errstr is only written in case of error. 6781 * @param errstr_size Writable size in \p errstr. 6782 * 6783 * @returns a new allocated NewPartitions object, or NULL if the 6784 * input parameters are invalid. 6785 * Use rd_kafka_NewPartitions_destroy() to free object when done. 6786 */ 6787 RD_EXPORT rd_kafka_NewPartitions_t * 6788 rd_kafka_NewPartitions_new(const char *topic, 6789 size_t new_total_cnt, 6790 char *errstr, 6791 size_t errstr_size); 6792 6793 /** 6794 * @brief Destroy and free a NewPartitions object previously created with 6795 * rd_kafka_NewPartitions_new() 6796 */ 6797 RD_EXPORT void 6798 rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts); 6799 6800 /** 6801 * @brief Helper function to destroy all NewPartitions objects in the 6802 * \p new_parts array (of \p new_parts_cnt elements). 6803 * The array itself is not freed. 6804 */ 6805 RD_EXPORT void 6806 rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, 6807 size_t new_parts_cnt); 6808 6809 /** 6810 * @brief Set the replica (broker id) assignment for \p new_partition_idx to the 6811 * replica set in \p broker_ids (of \p broker_id_cnt elements). 6812 * 6813 * @remark An application must either set the replica assignment for 6814 * all new partitions, or none. 6815 * 6816 * @remark If called, this function must be called consecutively for each 6817 * new partition being created, 6818 * where \p new_partition_idx 0 is the first new partition, 6819 * 1 is the second, and so on. 6820 * 6821 * @remark \p broker_id_cnt should match the topic's replication factor. 6822 * 6823 * @remark Use rd_kafka_metadata() to retrieve the list of brokers 6824 * in the cluster. 6825 * 6826 * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code 6827 * if the arguments were invalid. 6828 * 6829 * @sa rd_kafka_AdminOptions_set_validate_only() 6830 */ 6831 RD_EXPORT rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment( 6832 rd_kafka_NewPartitions_t *new_parts, 6833 int32_t new_partition_idx, 6834 int32_t *broker_ids, 6835 size_t broker_id_cnt, 6836 char *errstr, 6837 size_t errstr_size); 6838 6839 6840 /** 6841 * @brief Create additional partitions for the given topics, as specified 6842 * by the \p new_parts array of size \p new_parts_cnt elements. 6843 * 6844 * @param rk Client instance. 6845 * @param new_parts Array of topics for which new partitions are to be created. 6846 * @param new_parts_cnt Number of elements in \p new_parts array. 6847 * @param options Optional admin options, or NULL for defaults. 6848 * @param rkqu Queue to emit result on. 6849 * 6850 * Supported admin options: 6851 * - rd_kafka_AdminOptions_set_validate_only() - default false 6852 * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds 6853 * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms 6854 * 6855 * @remark The result event type emitted on the supplied queue is of type 6856 * \c RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT 6857 */ 6858 RD_EXPORT void rd_kafka_CreatePartitions(rd_kafka_t *rk, 6859 rd_kafka_NewPartitions_t **new_parts, 6860 size_t new_parts_cnt, 6861 const rd_kafka_AdminOptions_t *options, 6862 rd_kafka_queue_t *rkqu); 6863 6864 6865 6866 /* 6867 * CreatePartitions result type and methods 6868 */ 6869 6870 /** 6871 * @brief Get an array of topic results from a CreatePartitions result. 6872 * 6873 * The returned \p topics life-time is the same as the \p result object. 6874 * 6875 * @param result Result o get topic results from. 6876 * @param cntp is updated to the number of elements in the array. 6877 */ 6878 RD_EXPORT const rd_kafka_topic_result_t ** 6879 rd_kafka_CreatePartitions_result_topics( 6880 const rd_kafka_CreatePartitions_result_t *result, 6881 size_t *cntp); 6882 6883 6884 6885 /* 6886 * Cluster, broker, topic configuration entries, sources, etc. 6887 * 6888 */ 6889 6890 /** 6891 * @enum rd_kafka_ConfigSource_t 6892 * 6893 * @brief Apache Kafka config sources. 6894 * 6895 * @remark These entities relate to the cluster, not the local client. 6896 * 6897 * @sa rd_kafka_conf_set(), et.al. for local client configuration. 6898 */ 6899 typedef enum rd_kafka_ConfigSource_t { 6900 /** Source unknown, e.g., in the ConfigEntry used for alter requests 6901 * where source is not set */ 6902 RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0, 6903 /** Dynamic topic config that is configured for a specific topic */ 6904 RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1, 6905 /** Dynamic broker config that is configured for a specific broker */ 6906 RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2, 6907 /** Dynamic broker config that is configured as default for all 6908 * brokers in the cluster */ 6909 RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3, 6910 /** Static broker config provided as broker properties at startup 6911 * (e.g. from server.properties file) */ 6912 RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4, 6913 /** Built-in default configuration for configs that have a 6914 * default value */ 6915 RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5, 6916 6917 /** Number of source types defined */ 6918 RD_KAFKA_CONFIG_SOURCE__CNT, 6919 } rd_kafka_ConfigSource_t; 6920 6921 6922 /** 6923 * @returns a string representation of the \p confsource. 6924 */ 6925 RD_EXPORT const char * 6926 rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource); 6927 6928 6929 /*! Apache Kafka configuration entry. */ 6930 typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t; 6931 6932 /** 6933 * @returns the configuration property name 6934 */ 6935 RD_EXPORT const char * 6936 rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry); 6937 6938 /** 6939 * @returns the configuration value, may be NULL for sensitive or unset 6940 * properties. 6941 */ 6942 RD_EXPORT const char * 6943 rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry); 6944 6945 /** 6946 * @returns the config source. 6947 */ 6948 RD_EXPORT rd_kafka_ConfigSource_t 6949 rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry); 6950 6951 /** 6952 * @returns 1 if the config property is read-only on the broker, else 0. 6953 * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. 6954 */ 6955 RD_EXPORT int 6956 rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry); 6957 6958 /** 6959 * @returns 1 if the config property is set to its default value on the broker, 6960 * else 0. 6961 * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. 6962 */ 6963 RD_EXPORT int 6964 rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry); 6965 6966 /** 6967 * @returns 1 if the config property contains sensitive information (such as 6968 * security configuration), else 0. 6969 * @remark An application should take care not to include the value of 6970 * sensitive configuration entries in its output. 6971 * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. 6972 */ 6973 RD_EXPORT int 6974 rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry); 6975 6976 /** 6977 * @returns 1 if this entry is a synonym, else 0. 6978 */ 6979 RD_EXPORT int 6980 rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry); 6981 6982 6983 /** 6984 * @returns the synonym config entry array. 6985 * 6986 * @param entry Entry to get synonyms for. 6987 * @param cntp is updated to the number of elements in the array. 6988 * 6989 * @remark The lifetime of the returned entry is the same as \p conf . 6990 * @remark Shall only be used on a DescribeConfigs result, 6991 * otherwise returns NULL. 6992 */ 6993 RD_EXPORT const rd_kafka_ConfigEntry_t ** 6994 rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, 6995 size_t *cntp); 6996 6997 6998 6999 /** 7000 * @enum rd_kafka_ResourceType_t 7001 * @brief Apache Kafka resource types 7002 */ 7003 typedef enum rd_kafka_ResourceType_t { 7004 RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */ 7005 RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ 7006 RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ 7007 RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ 7008 RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ 7009 RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */ 7010 } rd_kafka_ResourceType_t; 7011 7012 /** 7013 * @enum rd_kafka_ResourcePatternType_t 7014 * @brief Apache Kafka pattern types 7015 */ 7016 typedef enum rd_kafka_ResourcePatternType_t { 7017 /** Unknown */ 7018 RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0, 7019 /** Any (used for lookups) */ 7020 RD_KAFKA_RESOURCE_PATTERN_ANY = 1, 7021 /** Match: will perform pattern matching */ 7022 RD_KAFKA_RESOURCE_PATTERN_MATCH = 2, 7023 /** Literal: A literal resource name */ 7024 RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3, 7025 /** Prefixed: A prefixed resource name */ 7026 RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4, 7027 RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT, 7028 } rd_kafka_ResourcePatternType_t; 7029 7030 /** 7031 * @returns a string representation of the \p resource_pattern_type 7032 */ 7033 RD_EXPORT const char *rd_kafka_ResourcePatternType_name( 7034 rd_kafka_ResourcePatternType_t resource_pattern_type); 7035 7036 /** 7037 * @returns a string representation of the \p restype 7038 */ 7039 RD_EXPORT const char * 7040 rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype); 7041 7042 /*! Apache Kafka configuration resource. */ 7043 typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t; 7044 7045 7046 /** 7047 * @brief Create new ConfigResource object. 7048 * 7049 * @param restype The resource type (e.g., RD_KAFKA_RESOURCE_TOPIC) 7050 * @param resname The resource name (e.g., the topic name) 7051 * 7052 * @returns a newly allocated object 7053 */ 7054 RD_EXPORT rd_kafka_ConfigResource_t * 7055 rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, 7056 const char *resname); 7057 7058 /** 7059 * @brief Destroy and free a ConfigResource object previously created with 7060 * rd_kafka_ConfigResource_new() 7061 */ 7062 RD_EXPORT void 7063 rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config); 7064 7065 7066 /** 7067 * @brief Helper function to destroy all ConfigResource objects in 7068 * the \p configs array (of \p config_cnt elements). 7069 * The array itself is not freed. 7070 */ 7071 RD_EXPORT void 7072 rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, 7073 size_t config_cnt); 7074 7075 7076 /** 7077 * @brief Set configuration name value pair. 7078 * 7079 * @param config ConfigResource to set config property on. 7080 * @param name Configuration name, depends on resource type. 7081 * @param value Configuration value, depends on resource type and \p name. 7082 * Set to \c NULL to revert configuration value to default. 7083 * 7084 * This will overwrite the current value. 7085 * 7086 * @returns RD_KAFKA_RESP_ERR_NO_ERROR if config was added to resource, 7087 * or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input. 7088 */ 7089 RD_EXPORT rd_kafka_resp_err_t 7090 rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, 7091 const char *name, 7092 const char *value); 7093 7094 7095 /** 7096 * @brief Get an array of config entries from a ConfigResource object. 7097 * 7098 * The returned object life-times are the same as the \p config object. 7099 * 7100 * @param config ConfigResource to get configs from. 7101 * @param cntp is updated to the number of elements in the array. 7102 */ 7103 RD_EXPORT const rd_kafka_ConfigEntry_t ** 7104 rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, 7105 size_t *cntp); 7106 7107 7108 7109 /** 7110 * @returns the ResourceType for \p config 7111 */ 7112 RD_EXPORT rd_kafka_ResourceType_t 7113 rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config); 7114 7115 /** 7116 * @returns the name for \p config 7117 */ 7118 RD_EXPORT const char * 7119 rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config); 7120 7121 /** 7122 * @returns the error for this resource from an AlterConfigs request 7123 */ 7124 RD_EXPORT rd_kafka_resp_err_t 7125 rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config); 7126 7127 /** 7128 * @returns the error string for this resource from an AlterConfigs 7129 * request, or NULL if no error. 7130 */ 7131 RD_EXPORT const char * 7132 rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config); 7133 7134 7135 /* 7136 * AlterConfigs - alter cluster configuration. 7137 * 7138 */ 7139 7140 7141 /** 7142 * @brief Update the configuration for the specified resources. 7143 * Updates are not transactional so they may succeed for a subset 7144 * of the provided resources while the others fail. 7145 * The configuration for a particular resource is updated atomically, 7146 * replacing values using the provided ConfigEntrys and reverting 7147 * unspecified ConfigEntrys to their default values. 7148 * 7149 * @remark Requires broker version >=0.11.0.0 7150 * 7151 * @warning AlterConfigs will replace all existing configuration for 7152 * the provided resources with the new configuration given, 7153 * reverting all other configuration to their default values. 7154 * 7155 * @remark Multiple resources and resource types may be set, but at most one 7156 * resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call 7157 * since these resource requests must be sent to the broker specified 7158 * in the resource. 7159 * 7160 */ 7161 RD_EXPORT 7162 void rd_kafka_AlterConfigs(rd_kafka_t *rk, 7163 rd_kafka_ConfigResource_t **configs, 7164 size_t config_cnt, 7165 const rd_kafka_AdminOptions_t *options, 7166 rd_kafka_queue_t *rkqu); 7167 7168 7169 /* 7170 * AlterConfigs result type and methods 7171 */ 7172 7173 /** 7174 * @brief Get an array of resource results from a AlterConfigs result. 7175 * 7176 * Use \c rd_kafka_ConfigResource_error() and 7177 * \c rd_kafka_ConfigResource_error_string() to extract per-resource error 7178 * results on the returned array elements. 7179 * 7180 * The returned object life-times are the same as the \p result object. 7181 * 7182 * @param result Result object to get resource results from. 7183 * @param cntp is updated to the number of elements in the array. 7184 * 7185 * @returns an array of ConfigResource elements, or NULL if not available. 7186 */ 7187 RD_EXPORT const rd_kafka_ConfigResource_t ** 7188 rd_kafka_AlterConfigs_result_resources( 7189 const rd_kafka_AlterConfigs_result_t *result, 7190 size_t *cntp); 7191 7192 7193 7194 /* 7195 * DescribeConfigs - retrieve cluster configuration. 7196 * 7197 */ 7198 7199 7200 /** 7201 * @brief Get configuration for the specified resources in \p configs. 7202 * 7203 * The returned configuration includes default values and the 7204 * rd_kafka_ConfigEntry_is_default() or rd_kafka_ConfigEntry_source() 7205 * methods may be used to distinguish them from user supplied values. 7206 * 7207 * The value of config entries where rd_kafka_ConfigEntry_is_sensitive() 7208 * is true will always be NULL to avoid disclosing sensitive 7209 * information, such as security settings. 7210 * 7211 * Configuration entries where rd_kafka_ConfigEntry_is_read_only() 7212 * is true can't be updated (with rd_kafka_AlterConfigs()). 7213 * 7214 * Synonym configuration entries are returned if the broker supports 7215 * it (broker version >= 1.1.0). See rd_kafka_ConfigEntry_synonyms(). 7216 * 7217 * @remark Requires broker version >=0.11.0.0 7218 * 7219 * @remark Multiple resources and resource types may be requested, but at most 7220 * one resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call 7221 * since these resource requests must be sent to the broker specified 7222 * in the resource. 7223 */ 7224 RD_EXPORT 7225 void rd_kafka_DescribeConfigs(rd_kafka_t *rk, 7226 rd_kafka_ConfigResource_t **configs, 7227 size_t config_cnt, 7228 const rd_kafka_AdminOptions_t *options, 7229 rd_kafka_queue_t *rkqu); 7230 7231 7232 7233 /* 7234 * DescribeConfigs result type and methods 7235 */ 7236 7237 /** 7238 * @brief Get an array of resource results from a DescribeConfigs result. 7239 * 7240 * The returned \p resources life-time is the same as the \p result object. 7241 * 7242 * @param result Result object to get resource results from. 7243 * @param cntp is updated to the number of elements in the array. 7244 */ 7245 RD_EXPORT const rd_kafka_ConfigResource_t ** 7246 rd_kafka_DescribeConfigs_result_resources( 7247 const rd_kafka_DescribeConfigs_result_t *result, 7248 size_t *cntp); 7249 7250 7251 /* 7252 * DeleteRecords - delete records (messages) from partitions 7253 * 7254 * 7255 */ 7256 7257 /**! Represents records to be deleted */ 7258 typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t; 7259 7260 /** 7261 * @brief Create a new DeleteRecords object. This object is later passed to 7262 * rd_kafka_DeleteRecords(). 7263 * 7264 * \p before_offsets must contain \c topic, \c partition, and 7265 * \c offset is the offset before which the messages will 7266 * be deleted (exclusive). 7267 * Set \c offset to RD_KAFKA_OFFSET_END (high-watermark) in order to 7268 * delete all data in the partition. 7269 * 7270 * @param before_offsets For each partition delete all messages up to but not 7271 * including the specified offset. 7272 * 7273 * @returns a new allocated DeleteRecords object. 7274 * Use rd_kafka_DeleteRecords_destroy() to free object when done. 7275 */ 7276 RD_EXPORT rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new( 7277 const rd_kafka_topic_partition_list_t *before_offsets); 7278 7279 /** 7280 * @brief Destroy and free a DeleteRecords object previously created with 7281 * rd_kafka_DeleteRecords_new() 7282 */ 7283 RD_EXPORT void 7284 rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records); 7285 7286 /** 7287 * @brief Helper function to destroy all DeleteRecords objects in 7288 * the \p del_groups array (of \p del_group_cnt elements). 7289 * The array itself is not freed. 7290 */ 7291 RD_EXPORT void 7292 rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, 7293 size_t del_record_cnt); 7294 7295 /** 7296 * @brief Delete records (messages) in topic partitions older than the 7297 * offsets provided. 7298 * 7299 * @param rk Client instance. 7300 * @param del_records The offsets to delete (up to). 7301 * Currently only one DeleteRecords_t (but containing 7302 * multiple offsets) is supported. 7303 * @param del_record_cnt The number of elements in del_records, must be 1. 7304 * @param options Optional admin options, or NULL for defaults. 7305 * @param rkqu Queue to emit result on. 7306 * 7307 * Supported admin options: 7308 * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds. 7309 * Controls how long the brokers will wait for records to be deleted. 7310 * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms. 7311 * Controls how long \c rdkafka will wait for the request to complete. 7312 * 7313 * @remark The result event type emitted on the supplied queue is of type 7314 * \c RD_KAFKA_EVENT_DELETERECORDS_RESULT 7315 */ 7316 RD_EXPORT void rd_kafka_DeleteRecords(rd_kafka_t *rk, 7317 rd_kafka_DeleteRecords_t **del_records, 7318 size_t del_record_cnt, 7319 const rd_kafka_AdminOptions_t *options, 7320 rd_kafka_queue_t *rkqu); 7321 7322 7323 /* 7324 * DeleteRecords result type and methods 7325 */ 7326 7327 /** 7328 * @brief Get a list of topic and partition results from a DeleteRecords result. 7329 * The returned objects will contain \c topic, \c partition, \c offset 7330 * and \c err. \c offset will be set to the post-deletion low-watermark 7331 * (smallest available offset of all live replicas). \c err will be set 7332 * per-partition if deletion failed. 7333 * 7334 * The returned object's life-time is the same as the \p result object. 7335 */ 7336 RD_EXPORT const rd_kafka_topic_partition_list_t * 7337 rd_kafka_DeleteRecords_result_offsets( 7338 const rd_kafka_DeleteRecords_result_t *result); 7339 7340 /* 7341 * DeleteGroups - delete groups from cluster 7342 * 7343 * 7344 */ 7345 7346 /*! Represents a group to be deleted. */ 7347 typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t; 7348 7349 /** 7350 * @brief Create a new DeleteGroup object. This object is later passed to 7351 * rd_kafka_DeleteGroups(). 7352 * 7353 * @param group Name of group to delete. 7354 * 7355 * @returns a new allocated DeleteGroup object. 7356 * Use rd_kafka_DeleteGroup_destroy() to free object when done. 7357 */ 7358 RD_EXPORT rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group); 7359 7360 /** 7361 * @brief Destroy and free a DeleteGroup object previously created with 7362 * rd_kafka_DeleteGroup_new() 7363 */ 7364 RD_EXPORT void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group); 7365 7366 /** 7367 * @brief Helper function to destroy all DeleteGroup objects in 7368 * the \p del_groups array (of \p del_group_cnt elements). 7369 * The array itself is not freed. 7370 */ 7371 RD_EXPORT void 7372 rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, 7373 size_t del_group_cnt); 7374 7375 /** 7376 * @brief Delete groups from cluster as specified by the \p del_groups 7377 * array of size \p del_group_cnt elements. 7378 * 7379 * @param rk Client instance. 7380 * @param del_groups Array of groups to delete. 7381 * @param del_group_cnt Number of elements in \p del_groups array. 7382 * @param options Optional admin options, or NULL for defaults. 7383 * @param rkqu Queue to emit result on. 7384 * 7385 * @remark The result event type emitted on the supplied queue is of type 7386 * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT 7387 */ 7388 RD_EXPORT 7389 void rd_kafka_DeleteGroups(rd_kafka_t *rk, 7390 rd_kafka_DeleteGroup_t **del_groups, 7391 size_t del_group_cnt, 7392 const rd_kafka_AdminOptions_t *options, 7393 rd_kafka_queue_t *rkqu); 7394 7395 7396 7397 /* 7398 * DeleteGroups result type and methods 7399 */ 7400 7401 /** 7402 * @brief Get an array of group results from a DeleteGroups result. 7403 * 7404 * The returned groups life-time is the same as the \p result object. 7405 * 7406 * @param result Result to get group results from. 7407 * @param cntp is updated to the number of elements in the array. 7408 */ 7409 RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( 7410 const rd_kafka_DeleteGroups_result_t *result, 7411 size_t *cntp); 7412 7413 7414 /* 7415 * DeleteConsumerGroupOffsets - delete groups from cluster 7416 * 7417 * 7418 */ 7419 7420 /*! Represents consumer group committed offsets to be deleted. */ 7421 typedef struct rd_kafka_DeleteConsumerGroupOffsets_s 7422 rd_kafka_DeleteConsumerGroupOffsets_t; 7423 7424 /** 7425 * @brief Create a new DeleteConsumerGroupOffsets object. 7426 * This object is later passed to rd_kafka_DeleteConsumerGroupOffsets(). 7427 * 7428 * @param group Consumer group id. 7429 * @param partitions Partitions to delete committed offsets for. 7430 * Only the topic and partition fields are used. 7431 * 7432 * @returns a new allocated DeleteConsumerGroupOffsets object. 7433 * Use rd_kafka_DeleteConsumerGroupOffsets_destroy() to free 7434 * object when done. 7435 */ 7436 RD_EXPORT rd_kafka_DeleteConsumerGroupOffsets_t * 7437 rd_kafka_DeleteConsumerGroupOffsets_new( 7438 const char *group, 7439 const rd_kafka_topic_partition_list_t *partitions); 7440 7441 /** 7442 * @brief Destroy and free a DeleteConsumerGroupOffsets object previously 7443 * created with rd_kafka_DeleteConsumerGroupOffsets_new() 7444 */ 7445 RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy( 7446 rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets); 7447 7448 /** 7449 * @brief Helper function to destroy all DeleteConsumerGroupOffsets objects in 7450 * the \p del_grpoffsets array (of \p del_grpoffsets_cnt elements). 7451 * The array itself is not freed. 7452 */ 7453 RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy_array( 7454 rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, 7455 size_t del_grpoffset_cnt); 7456 7457 /** 7458 * @brief Delete committed offsets for a set of partitions in a conusmer 7459 * group. This will succeed at the partition level only if the group 7460 * is not actively subscribed to the corresponding topic. 7461 * 7462 * @param rk Client instance. 7463 * @param del_grpoffsets Array of group committed offsets to delete. 7464 * MUST only be one single element. 7465 * @param del_grpoffsets_cnt Number of elements in \p del_grpoffsets array. 7466 * MUST always be 1. 7467 * @param options Optional admin options, or NULL for defaults. 7468 * @param rkqu Queue to emit result on. 7469 * 7470 * @remark The result event type emitted on the supplied queue is of type 7471 * \c RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 7472 * 7473 * @remark The current implementation only supports one group per invocation. 7474 */ 7475 RD_EXPORT 7476 void rd_kafka_DeleteConsumerGroupOffsets( 7477 rd_kafka_t *rk, 7478 rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, 7479 size_t del_grpoffsets_cnt, 7480 const rd_kafka_AdminOptions_t *options, 7481 rd_kafka_queue_t *rkqu); 7482 7483 7484 7485 /* 7486 * DeleteConsumerGroupOffsets result type and methods 7487 */ 7488 7489 /** 7490 * @brief Get an array of results from a DeleteConsumerGroupOffsets result. 7491 * 7492 * The returned groups life-time is the same as the \p result object. 7493 * 7494 * @param result Result to get group results from. 7495 * @param cntp is updated to the number of elements in the array. 7496 */ 7497 RD_EXPORT const rd_kafka_group_result_t ** 7498 rd_kafka_DeleteConsumerGroupOffsets_result_groups( 7499 const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, 7500 size_t *cntp); 7501 7502 /** 7503 * @brief ACL Binding is used to create access control lists. 7504 * 7505 * 7506 */ 7507 typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t; 7508 7509 /** 7510 * @brief ACL Binding filter is used to filter access control lists. 7511 * 7512 */ 7513 typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t; 7514 7515 /** 7516 * @returns the error object for the given acl result, or NULL on success. 7517 */ 7518 RD_EXPORT const rd_kafka_error_t * 7519 rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres); 7520 7521 7522 /** 7523 * @name AclOperation 7524 * @{ 7525 */ 7526 7527 /** 7528 * @enum rd_kafka_AclOperation_t 7529 * @brief Apache Kafka ACL operation types. 7530 */ 7531 typedef enum rd_kafka_AclOperation_t { 7532 RD_KAFKA_ACL_OPERATION_UNKNOWN = 0, /**< Unknown */ 7533 RD_KAFKA_ACL_OPERATION_ANY = 7534 1, /**< In a filter, matches any AclOperation */ 7535 RD_KAFKA_ACL_OPERATION_ALL = 2, /**< ALL operation */ 7536 RD_KAFKA_ACL_OPERATION_READ = 3, /**< READ operation */ 7537 RD_KAFKA_ACL_OPERATION_WRITE = 4, /**< WRITE operation */ 7538 RD_KAFKA_ACL_OPERATION_CREATE = 5, /**< CREATE operation */ 7539 RD_KAFKA_ACL_OPERATION_DELETE = 6, /**< DELETE operation */ 7540 RD_KAFKA_ACL_OPERATION_ALTER = 7, /**< ALTER operation */ 7541 RD_KAFKA_ACL_OPERATION_DESCRIBE = 8, /**< DESCRIBE operation */ 7542 RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 7543 9, /**< CLUSTER_ACTION operation */ 7544 RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 7545 10, /**< DESCRIBE_CONFIGS operation */ 7546 RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 7547 11, /**< ALTER_CONFIGS operation */ 7548 RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 7549 12, /**< IDEMPOTENT_WRITE operation */ 7550 RD_KAFKA_ACL_OPERATION__CNT 7551 } rd_kafka_AclOperation_t; 7552 7553 /** 7554 * @returns a string representation of the \p acl_operation 7555 */ 7556 RD_EXPORT const char * 7557 rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation); 7558 7559 /**@}*/ 7560 7561 /** 7562 * @name AclPermissionType 7563 * @{ 7564 */ 7565 7566 /** 7567 * @enum rd_kafka_AclPermissionType_t 7568 * @brief Apache Kafka ACL permission types. 7569 */ 7570 typedef enum rd_kafka_AclPermissionType_t { 7571 RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0, /**< Unknown */ 7572 RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 7573 1, /**< In a filter, matches any AclPermissionType */ 7574 RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2, /**< Disallows access */ 7575 RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3, /**< Grants access. */ 7576 RD_KAFKA_ACL_PERMISSION_TYPE__CNT 7577 } rd_kafka_AclPermissionType_t; 7578 7579 /** 7580 * @returns a string representation of the \p acl_permission_type 7581 */ 7582 RD_EXPORT const char *rd_kafka_AclPermissionType_name( 7583 rd_kafka_AclPermissionType_t acl_permission_type); 7584 7585 /**@}*/ 7586 7587 /** 7588 * @brief Create a new AclBinding object. This object is later passed to 7589 * rd_kafka_CreateAcls(). 7590 * 7591 * @param restype The ResourceType. 7592 * @param name The resource name. 7593 * @param resource_pattern_type The pattern type. 7594 * @param principal A principal, following the kafka specification. 7595 * @param host An hostname or ip. 7596 * @param operation A Kafka operation. 7597 * @param permission_type A Kafka permission type. 7598 * @param errstr An error string for returning errors or NULL to not use it. 7599 * @param errstr_size The \p errstr size or 0 to not use it. 7600 * 7601 * @returns a new allocated AclBinding object, or NULL if the input parameters 7602 * are invalid. 7603 * Use rd_kafka_AclBinding_destroy() to free object when done. 7604 */ 7605 RD_EXPORT rd_kafka_AclBinding_t * 7606 rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, 7607 const char *name, 7608 rd_kafka_ResourcePatternType_t resource_pattern_type, 7609 const char *principal, 7610 const char *host, 7611 rd_kafka_AclOperation_t operation, 7612 rd_kafka_AclPermissionType_t permission_type, 7613 char *errstr, 7614 size_t errstr_size); 7615 7616 /** 7617 * @brief Create a new AclBindingFilter object. This object is later passed to 7618 * rd_kafka_DescribeAcls() or 7619 * rd_kafka_DeletesAcls() in order to filter 7620 * the acls to retrieve or to delete. 7621 * Use the same rd_kafka_AclBinding functions to query or destroy it. 7622 * 7623 * @param restype The ResourceType or \c RD_KAFKA_RESOURCE_ANY if 7624 * not filtering by this field. 7625 * @param name The resource name or NULL if not filtering by this field. 7626 * @param resource_pattern_type The pattern type or \c 7627 * RD_KAFKA_RESOURCE_PATTERN_ANY if not filtering by this field. 7628 * @param principal A principal or NULL if not filtering by this field. 7629 * @param host An hostname or ip or NULL if not filtering by this field. 7630 * @param operation A Kafka operation or \c RD_KAFKA_ACL_OPERATION_ANY if not 7631 * filtering by this field. 7632 * @param permission_type A Kafka permission type or \c 7633 * RD_KAFKA_ACL_PERMISSION_TYPE_ANY if not filtering by this field. 7634 * @param errstr An error string for returning errors or NULL to not use it. 7635 * @param errstr_size The \p errstr size or 0 to not use it. 7636 * 7637 * @returns a new allocated AclBindingFilter object, or NULL if the input 7638 * parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when 7639 * done. 7640 */ 7641 RD_EXPORT rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new( 7642 rd_kafka_ResourceType_t restype, 7643 const char *name, 7644 rd_kafka_ResourcePatternType_t resource_pattern_type, 7645 const char *principal, 7646 const char *host, 7647 rd_kafka_AclOperation_t operation, 7648 rd_kafka_AclPermissionType_t permission_type, 7649 char *errstr, 7650 size_t errstr_size); 7651 7652 /** 7653 * @returns the resource type for the given acl binding. 7654 */ 7655 RD_EXPORT rd_kafka_ResourceType_t 7656 rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl); 7657 7658 /** 7659 * @returns the resource name for the given acl binding. 7660 * 7661 * @remark lifetime of the returned string is the same as the \p acl. 7662 */ 7663 RD_EXPORT const char * 7664 rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl); 7665 7666 /** 7667 * @returns the principal for the given acl binding. 7668 * 7669 * @remark lifetime of the returned string is the same as the \p acl. 7670 */ 7671 RD_EXPORT const char * 7672 rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl); 7673 7674 /** 7675 * @returns the host for the given acl binding. 7676 * 7677 * @remark lifetime of the returned string is the same as the \p acl. 7678 */ 7679 RD_EXPORT const char * 7680 rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl); 7681 7682 /** 7683 * @returns the acl operation for the given acl binding. 7684 */ 7685 RD_EXPORT rd_kafka_AclOperation_t 7686 rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl); 7687 7688 /** 7689 * @returns the permission type for the given acl binding. 7690 */ 7691 RD_EXPORT rd_kafka_AclPermissionType_t 7692 rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl); 7693 7694 /** 7695 * @returns the resource pattern type for the given acl binding. 7696 */ 7697 RD_EXPORT rd_kafka_ResourcePatternType_t 7698 rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl); 7699 7700 /** 7701 * @returns the error object for the given acl binding, or NULL on success. 7702 */ 7703 RD_EXPORT const rd_kafka_error_t * 7704 rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl); 7705 7706 7707 /** 7708 * @brief Destroy and free an AclBinding object previously created with 7709 * rd_kafka_AclBinding_new() 7710 */ 7711 RD_EXPORT void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding); 7712 7713 7714 /** 7715 * @brief Helper function to destroy all AclBinding objects in 7716 * the \p acl_bindings array (of \p acl_bindings_cnt elements). 7717 * The array itself is not freed. 7718 */ 7719 RD_EXPORT void 7720 rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, 7721 size_t acl_bindings_cnt); 7722 7723 /** 7724 * @brief Get an array of acl results from a CreateAcls result. 7725 * 7726 * The returned \p acl result life-time is the same as the \p result object. 7727 * @param result CreateAcls result to get acl results from. 7728 * @param cntp is updated to the number of elements in the array. 7729 */ 7730 RD_EXPORT const rd_kafka_acl_result_t ** 7731 rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, 7732 size_t *cntp); 7733 7734 /** 7735 * @brief Create acls as specified by the \p new_acls 7736 * array of size \p new_topic_cnt elements. 7737 * 7738 * @param rk Client instance. 7739 * @param new_acls Array of new acls to create. 7740 * @param new_acls_cnt Number of elements in \p new_acls array. 7741 * @param options Optional admin options, or NULL for defaults. 7742 * @param rkqu Queue to emit result on. 7743 * 7744 * Supported admin options: 7745 * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms 7746 * 7747 * @remark The result event type emitted on the supplied queue is of type 7748 * \c RD_KAFKA_EVENT_CREATEACLS_RESULT 7749 */ 7750 RD_EXPORT void rd_kafka_CreateAcls(rd_kafka_t *rk, 7751 rd_kafka_AclBinding_t **new_acls, 7752 size_t new_acls_cnt, 7753 const rd_kafka_AdminOptions_t *options, 7754 rd_kafka_queue_t *rkqu); 7755 7756 /** 7757 * @section DescribeAcls - describe access control lists. 7758 * 7759 * 7760 */ 7761 7762 /** 7763 * @brief Get an array of resource results from a DescribeAcls result. 7764 * 7765 * The returned \p resources life-time is the same as the \p result object. 7766 * @param result DescribeAcls result to get acls from. 7767 * @param cntp is updated to the number of elements in the array. 7768 */ 7769 RD_EXPORT const rd_kafka_AclBinding_t ** 7770 rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, 7771 size_t *cntp); 7772 7773 /** 7774 * @brief Describe acls matching the filter provided in \p acl_filter 7775 * 7776 * @param rk Client instance. 7777 * @param acl_filter Filter for the returned acls. 7778 * @param options Optional admin options, or NULL for defaults. 7779 * @param rkqu Queue to emit result on. 7780 * 7781 * Supported admin options: 7782 * - rd_kafka_AdminOptions_set_operation_timeout() - default 0 7783 * 7784 * @remark The result event type emitted on the supplied queue is of type 7785 * \c RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 7786 */ 7787 RD_EXPORT void rd_kafka_DescribeAcls(rd_kafka_t *rk, 7788 rd_kafka_AclBindingFilter_t *acl_filter, 7789 const rd_kafka_AdminOptions_t *options, 7790 rd_kafka_queue_t *rkqu); 7791 7792 /** 7793 * @section DeleteAcls - delete access control lists. 7794 * 7795 * 7796 */ 7797 7798 typedef struct rd_kafka_DeleteAcls_result_response_s 7799 rd_kafka_DeleteAcls_result_response_t; 7800 7801 /** 7802 * @brief Get an array of DeleteAcls result responses from a DeleteAcls result. 7803 * 7804 * The returned \p responses life-time is the same as the \p result object. 7805 * @param result DeleteAcls result to get responses from. 7806 * @param cntp is updated to the number of elements in the array. 7807 */ 7808 RD_EXPORT const rd_kafka_DeleteAcls_result_response_t ** 7809 rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, 7810 size_t *cntp); 7811 7812 /** 7813 * @returns the error object for the given DeleteAcls result response, 7814 * or NULL on success. 7815 */ 7816 RD_EXPORT const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error( 7817 const rd_kafka_DeleteAcls_result_response_t *result_response); 7818 7819 7820 /** 7821 * @returns the matching acls array for the given DeleteAcls result response. 7822 * 7823 * @remark lifetime of the returned acl bindings is the same as the \p 7824 * result_response. 7825 */ 7826 RD_EXPORT const rd_kafka_AclBinding_t ** 7827 rd_kafka_DeleteAcls_result_response_matching_acls( 7828 const rd_kafka_DeleteAcls_result_response_t *result_response, 7829 size_t *matching_acls_cntp); 7830 7831 /** 7832 * @brief Delete acls matching the filteres provided in \p del_acls 7833 * array of size \p del_acls_cnt. 7834 * 7835 * @param rk Client instance. 7836 * @param del_acls Filters for the acls to delete. 7837 * @param del_acls_cnt Number of elements in \p del_acls array. 7838 * @param options Optional admin options, or NULL for defaults. 7839 * @param rkqu Queue to emit result on. 7840 * 7841 * Supported admin options: 7842 * - rd_kafka_AdminOptions_set_operation_timeout() - default 0 7843 * 7844 * @remark The result event type emitted on the supplied queue is of type 7845 * \c RD_KAFKA_EVENT_DELETEACLS_RESULT 7846 */ 7847 RD_EXPORT void rd_kafka_DeleteAcls(rd_kafka_t *rk, 7848 rd_kafka_AclBindingFilter_t **del_acls, 7849 size_t del_acls_cnt, 7850 const rd_kafka_AdminOptions_t *options, 7851 rd_kafka_queue_t *rkqu); 7852 7853 /**@}*/ 7854 7855 /** 7856 * @name Security APIs 7857 * @{ 7858 * 7859 */ 7860 7861 /** 7862 * @brief Set SASL/OAUTHBEARER token and metadata 7863 * 7864 * @param rk Client instance. 7865 * @param token_value the mandatory token value to set, often (but not 7866 * necessarily) a JWS compact serialization as per 7867 * https://tools.ietf.org/html/rfc7515#section-3.1. 7868 * @param md_lifetime_ms when the token expires, in terms of the number of 7869 * milliseconds since the epoch. 7870 * @param md_principal_name the mandatory Kafka principal name associated 7871 * with the token. 7872 * @param extensions optional SASL extensions key-value array with 7873 * \p extensions_size elements (number of keys * 2), where [i] is the key and 7874 * [i+1] is the key's value, to be communicated to the broker 7875 * as additional key-value pairs during the initial client response as per 7876 * https://tools.ietf.org/html/rfc7628#section-3.1. The key-value pairs are 7877 * copied. 7878 * @param extension_size the number of SASL extension keys plus values, 7879 * which must be a non-negative multiple of 2. 7880 * @param errstr A human readable error string (nul-terminated) is written to 7881 * this location that must be of at least \p errstr_size bytes. 7882 * The \p errstr is only written in case of error. 7883 * @param errstr_size Writable size in \p errstr. 7884 * 7885 * The SASL/OAUTHBEARER token refresh callback or event handler should invoke 7886 * this method upon success. The extension keys must not include the reserved 7887 * key "`auth`", and all extension keys and values must conform to the required 7888 * format as per https://tools.ietf.org/html/rfc7628#section-3.1: 7889 * 7890 * key = 1*(ALPHA) 7891 * value = *(VCHAR / SP / HTAB / CR / LF ) 7892 * 7893 * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise \p errstr set 7894 * and:<br> 7895 * \c RD_KAFKA_RESP_ERR__INVALID_ARG if any of the arguments are 7896 * invalid;<br> 7897 * \c RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not 7898 * supported by this build;<br> 7899 * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is 7900 * not configured as the client's authentication mechanism.<br> 7901 * 7902 * @sa rd_kafka_oauthbearer_set_token_failure 7903 * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb 7904 */ 7905 RD_EXPORT 7906 rd_kafka_resp_err_t 7907 rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, 7908 const char *token_value, 7909 int64_t md_lifetime_ms, 7910 const char *md_principal_name, 7911 const char **extensions, 7912 size_t extension_size, 7913 char *errstr, 7914 size_t errstr_size); 7915 7916 /** 7917 * @brief SASL/OAUTHBEARER token refresh failure indicator. 7918 * 7919 * @param rk Client instance. 7920 * @param errstr mandatory human readable error reason for failing to acquire 7921 * a token. 7922 * 7923 * The SASL/OAUTHBEARER token refresh callback or event handler should invoke 7924 * this method upon failure. 7925 * 7926 * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise:<br> 7927 * \c RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not 7928 * supported by this build;<br> 7929 * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is 7930 * not configured as the client's authentication mechanism,<br> 7931 * \c RD_KAFKA_RESP_ERR__INVALID_ARG if no error string is supplied. 7932 * 7933 * @sa rd_kafka_oauthbearer_set_token 7934 * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb 7935 */ 7936 RD_EXPORT 7937 rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, 7938 const char *errstr); 7939 7940 /**@}*/ 7941 7942 7943 /** 7944 * @name Transactional producer API 7945 * 7946 * The transactional producer operates on top of the idempotent producer, 7947 * and provides full exactly-once semantics (EOS) for Apache Kafka when used 7948 * with the transaction aware consumer (\c isolation.level=read_committed). 7949 * 7950 * A producer instance is configured for transactions by setting the 7951 * \c transactional.id to an identifier unique for the application. This 7952 * id will be used to fence stale transactions from previous instances of 7953 * the application, typically following an outage or crash. 7954 * 7955 * After creating the transactional producer instance using rd_kafka_new() 7956 * the transactional state must be initialized by calling 7957 * rd_kafka_init_transactions(). This is a blocking call that will 7958 * acquire a runtime producer id from the transaction coordinator broker 7959 * as well as abort any stale transactions and fence any still running producer 7960 * instances with the same \c transactional.id. 7961 * 7962 * Once transactions are initialized the application may begin a new 7963 * transaction by calling rd_kafka_begin_transaction(). 7964 * A producer instance may only have one single on-going transaction. 7965 * 7966 * Any messages produced after the transaction has been started will 7967 * belong to the ongoing transaction and will be committed or aborted 7968 * atomically. 7969 * It is not permitted to produce messages outside a transaction 7970 * boundary, e.g., before rd_kafka_begin_transaction() or after 7971 * rd_kafka_commit_transaction(), rd_kafka_abort_transaction(), or after 7972 * the current transaction has failed. 7973 * 7974 * If consumed messages are used as input to the transaction, the consumer 7975 * instance must be configured with \c enable.auto.commit set to \c false. 7976 * To commit the consumed offsets along with the transaction pass the 7977 * list of consumed partitions and the last offset processed + 1 to 7978 * rd_kafka_send_offsets_to_transaction() prior to committing the transaction. 7979 * This allows an aborted transaction to be restarted using the previously 7980 * committed offsets. 7981 * 7982 * To commit the produced messages, and any consumed offsets, to the 7983 * current transaction, call rd_kafka_commit_transaction(). 7984 * This call will block until the transaction has been fully committed or 7985 * failed (typically due to fencing by a newer producer instance). 7986 * 7987 * Alternatively, if processing fails, or an abortable transaction error is 7988 * raised, the transaction needs to be aborted by calling 7989 * rd_kafka_abort_transaction() which marks any produced messages and 7990 * offset commits as aborted. 7991 * 7992 * After the current transaction has been committed or aborted a new 7993 * transaction may be started by calling rd_kafka_begin_transaction() again. 7994 * 7995 * @par Retriable errors 7996 * Some error cases allow the attempted operation to be retried, this is 7997 * indicated by the error object having the retriable flag set which can 7998 * be detected by calling rd_kafka_error_is_retriable(). 7999 * When this flag is set the application may retry the operation immediately 8000 * or preferably after a shorter grace period (to avoid busy-looping). 8001 * Retriable errors include timeouts, broker transport failures, etc. 8002 * 8003 * @par Abortable errors 8004 * An ongoing transaction may fail permanently due to various errors, 8005 * such as transaction coordinator becoming unavailable, write failures to the 8006 * Apache Kafka log, under-replicated partitions, etc. 8007 * At this point the producer application must abort the current transaction 8008 * using rd_kafka_abort_transaction() and optionally start a new transaction 8009 * by calling rd_kafka_begin_transaction(). 8010 * Whether an error is abortable or not is detected by calling 8011 * rd_kafka_error_txn_requires_abort() on the returned error object. 8012 * 8013 * @par Fatal errors 8014 * While the underlying idempotent producer will typically only raise 8015 * fatal errors for unrecoverable cluster errors where the idempotency 8016 * guarantees can't be maintained, most of these are treated as abortable by 8017 * the transactional producer since transactions may be aborted and retried 8018 * in their entirety; 8019 * The transactional producer on the other hand introduces a set of additional 8020 * fatal errors which the application needs to handle by shutting down the 8021 * producer and terminate. There is no way for a producer instance to recover 8022 * from fatal errors. 8023 * Whether an error is fatal or not is detected by calling 8024 * rd_kafka_error_is_fatal() on the returned error object or by checking 8025 * the global rd_kafka_fatal_error() code. 8026 * Fatal errors are raised by triggering the \c error_cb (see the 8027 * Fatal error chapter in INTRODUCTION.md for more information), and any 8028 * sub-sequent transactional API calls will return RD_KAFKA_RESP_ERR__FATAL 8029 * or have the fatal flag set (see rd_kafka_error_is_fatal()). 8030 * The originating fatal error code can be retrieved by calling 8031 * rd_kafka_fatal_error(). 8032 * 8033 * @par Handling of other errors 8034 * For errors that have neither retriable, abortable or the fatal flag set 8035 * it is not always obvious how to handle them. While some of these errors 8036 * may be indicative of bugs in the application code, such as when 8037 * an invalid parameter is passed to a method, other errors might originate 8038 * from the broker and be passed thru as-is to the application. 8039 * The general recommendation is to treat these errors, that have 8040 * neither the retriable or abortable flags set, as fatal. 8041 * 8042 * @par Error handling example 8043 * @code 8044 * retry: 8045 * rd_kafka_error_t *error; 8046 * 8047 * error = rd_kafka_commit_transaction(producer, 10*1000); 8048 * if (!error) 8049 * return success; 8050 * else if (rd_kafka_error_txn_requires_abort(error)) { 8051 * do_abort_transaction_and_reset_inputs(); 8052 * } else if (rd_kafka_error_is_retriable(error)) { 8053 * rd_kafka_error_destroy(error); 8054 * goto retry; 8055 * } else { // treat all other errors as fatal errors 8056 * fatal_error(rd_kafka_error_string(error)); 8057 * } 8058 * rd_kafka_error_destroy(error); 8059 * @endcode 8060 * 8061 * 8062 * @{ 8063 */ 8064 8065 8066 /** 8067 * @brief Initialize transactions for the producer instance. 8068 * 8069 * This function ensures any transactions initiated by previous instances 8070 * of the producer with the same \c transactional.id are completed. 8071 * If the previous instance failed with a transaction in progress the 8072 * previous transaction will be aborted. 8073 * This function needs to be called before any other transactional or 8074 * produce functions are called when the \c transactional.id is configured. 8075 * 8076 * If the last transaction had begun completion (following transaction commit) 8077 * but not yet finished, this function will await the previous transaction's 8078 * completion. 8079 * 8080 * When any previous transactions have been fenced this function 8081 * will acquire the internal producer id and epoch, used in all future 8082 * transactional messages issued by this producer instance. 8083 * 8084 * @param rk Producer instance. 8085 * @param timeout_ms The maximum time to block. On timeout the operation 8086 * may continue in the background, depending on state, 8087 * and it is okay to call init_transactions() again. 8088 * 8089 * @remark This function may block up to \p timeout_ms milliseconds. 8090 * 8091 * @returns NULL on success or an error object on failure. 8092 * Check whether the returned error object permits retrying 8093 * by calling rd_kafka_error_is_retriable(), or whether a fatal 8094 * error has been raised by calling rd_kafka_error_is_fatal(). 8095 * Error codes: 8096 * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction coordinator 8097 * could be not be contacted within \p timeout_ms (retriable), 8098 * RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE if the transaction 8099 * coordinator is not available (retriable), 8100 * RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS if a previous transaction 8101 * would not complete within \p timeout_ms (retriable), 8102 * RD_KAFKA_RESP_ERR__STATE if transactions have already been started 8103 * or upon fatal error, 8104 * RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE if the broker(s) do not 8105 * support transactions (<Apache Kafka 0.11), this also raises a 8106 * fatal error, 8107 * RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT if the configured 8108 * \c transaction.timeout.ms is outside the broker-configured range, 8109 * this also raises a fatal error, 8110 * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been 8111 * configured for the producer instance, 8112 * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance, 8113 * or \p timeout_ms is out of range. 8114 * Other error codes not listed here may be returned, depending on 8115 * broker version. 8116 * 8117 * @remark The returned error object (if not NULL) must be destroyed with 8118 * rd_kafka_error_destroy(). 8119 */ 8120 RD_EXPORT 8121 rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms); 8122 8123 8124 8125 /** 8126 * @brief Begin a new transaction. 8127 * 8128 * rd_kafka_init_transactions() must have been called successfully (once) 8129 * before this function is called. 8130 * 8131 * Upon successful return from this function the application has to perform at 8132 * least one of the following operations within \c transaction.timeout.ms to 8133 * avoid timing out the transaction on the broker: 8134 * * rd_kafka_produce() (et.al) 8135 * * rd_kafka_send_offsets_to_transaction() 8136 * * rd_kafka_commit_transaction() 8137 * * rd_kafka_abort_transaction() 8138 * 8139 * Any messages produced, offsets sent (rd_kafka_send_offsets_to_transaction()), 8140 * etc, after the successful return of this function will be part of 8141 * the transaction and committed or aborted atomatically. 8142 * 8143 * Finish the transaction by calling rd_kafka_commit_transaction() or 8144 * abort the transaction by calling rd_kafka_abort_transaction(). 8145 * 8146 * @param rk Producer instance. 8147 * 8148 * @returns NULL on success or an error object on failure. 8149 * Check whether a fatal error has been raised by 8150 * calling rd_kafka_error_is_fatal(). 8151 * Error codes: 8152 * RD_KAFKA_RESP_ERR__STATE if a transaction is already in progress 8153 * or upon fatal error, 8154 * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been 8155 * configured for the producer instance, 8156 * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance. 8157 * Other error codes not listed here may be returned, depending on 8158 * broker version. 8159 * 8160 * @remark With the transactional producer, rd_kafka_produce(), 8161 * rd_kafka_producev(), et.al, are only allowed during an on-going 8162 * transaction, as started with this function. 8163 * Any produce call outside an on-going transaction, or for a failed 8164 * transaction, will fail. 8165 * 8166 * @remark The returned error object (if not NULL) must be destroyed with 8167 * rd_kafka_error_destroy(). 8168 */ 8169 RD_EXPORT 8170 rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk); 8171 8172 8173 /** 8174 * @brief Sends a list of topic partition offsets to the consumer group 8175 * coordinator for \p cgmetadata, and marks the offsets as part 8176 * part of the current transaction. 8177 * These offsets will be considered committed only if the transaction is 8178 * committed successfully. 8179 * 8180 * The offsets should be the next message your application will consume, 8181 * i.e., the last processed message's offset + 1 for each partition. 8182 * Either track the offsets manually during processing or use 8183 * rd_kafka_position() (on the consumer) to get the current offsets for 8184 * the partitions assigned to the consumer. 8185 * 8186 * Use this method at the end of a consume-transform-produce loop prior 8187 * to committing the transaction with rd_kafka_commit_transaction(). 8188 * 8189 * @param rk Producer instance. 8190 * @param offsets List of offsets to commit to the consumer group upon 8191 * successful commit of the transaction. Offsets should be 8192 * the next message to consume, e.g., last processed message + 1. 8193 * @param cgmetadata The current consumer group metadata as returned by 8194 * rd_kafka_consumer_group_metadata() on the consumer 8195 * instance the provided offsets were consumed from. 8196 * @param timeout_ms Maximum time allowed to register the offsets on the broker. 8197 * 8198 * @remark This function must be called on the transactional producer instance, 8199 * not the consumer. 8200 * 8201 * @remark The consumer must disable auto commits 8202 * (set \c enable.auto.commit to false on the consumer). 8203 * 8204 * @remark Logical and invalid offsets (such as RD_KAFKA_OFFSET_INVALID) in 8205 * \p offsets will be ignored, if there are no valid offsets in 8206 * \p offsets the function will return RD_KAFKA_RESP_ERR_NO_ERROR 8207 * and no action will be taken. 8208 * 8209 * @returns NULL on success or an error object on failure. 8210 * Check whether the returned error object permits retrying 8211 * by calling rd_kafka_error_is_retriable(), or whether an abortable 8212 * or fatal error has been raised by calling 8213 * rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal() 8214 * respectively. 8215 * Error codes: 8216 * RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, 8217 * RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer 8218 * transaction has been fenced by a newer producer instance, 8219 * RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the 8220 * producer is no longer authorized to perform transactional 8221 * operations, 8222 * RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED if the producer is 8223 * not authorized to write the consumer offsets to the group 8224 * coordinator, 8225 * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been 8226 * configured for the producer instance, 8227 * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance, 8228 * or if the \p consumer_group_id or \p offsets are empty, 8229 * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous 8230 * rd_kafka_send_offsets_to_transaction() call is still in progress. 8231 * Other error codes not listed here may be returned, depending on 8232 * broker version. 8233 * 8234 * @remark The returned error object (if not NULL) must be destroyed with 8235 * rd_kafka_error_destroy(). 8236 */ 8237 RD_EXPORT 8238 rd_kafka_error_t *rd_kafka_send_offsets_to_transaction( 8239 rd_kafka_t *rk, 8240 const rd_kafka_topic_partition_list_t *offsets, 8241 const rd_kafka_consumer_group_metadata_t *cgmetadata, 8242 int timeout_ms); 8243 8244 8245 /** 8246 * @brief Commit the current transaction (as started with 8247 * rd_kafka_begin_transaction()). 8248 * 8249 * Any outstanding messages will be flushed (delivered) before actually 8250 * committing the transaction. 8251 * 8252 * If any of the outstanding messages fail permanently the current 8253 * transaction will enter the abortable error state and this 8254 * function will return an abortable error, in this case the application 8255 * must call rd_kafka_abort_transaction() before attempting a new 8256 * transaction with rd_kafka_begin_transaction(). 8257 * 8258 * @param rk Producer instance. 8259 * @param timeout_ms The maximum time to block. On timeout the operation 8260 * may continue in the background, depending on state, 8261 * and it is okay to call this function again. 8262 * Pass -1 to use the remaining transaction timeout, 8263 * this is the recommended use. 8264 * 8265 * @remark It is strongly recommended to always pass -1 (remaining transaction 8266 * time) as the \p timeout_ms. Using other values risk internal 8267 * state desynchronization in case any of the underlying protocol 8268 * requests fail. 8269 * 8270 * @remark This function will block until all outstanding messages are 8271 * delivered and the transaction commit request has been successfully 8272 * handled by the transaction coordinator, or until \p timeout_ms 8273 * expires, which ever comes first. On timeout the application may 8274 * call the function again. 8275 * 8276 * @remark Will automatically call rd_kafka_flush() to ensure all queued 8277 * messages are delivered before attempting to commit the 8278 * transaction. 8279 * If the application has enabled RD_KAFKA_EVENT_DR it must 8280 * serve the event queue in a separate thread since rd_kafka_flush() 8281 * will not serve delivery reports in this mode. 8282 * 8283 * @returns NULL on success or an error object on failure. 8284 * Check whether the returned error object permits retrying 8285 * by calling rd_kafka_error_is_retriable(), or whether an abortable 8286 * or fatal error has been raised by calling 8287 * rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal() 8288 * respectively. 8289 * Error codes: 8290 * RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, 8291 * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be 8292 * complete commmitted within \p timeout_ms, this is a retriable 8293 * error as the commit continues in the background, 8294 * RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer 8295 * transaction has been fenced by a newer producer instance, 8296 * RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the 8297 * producer is no longer authorized to perform transactional 8298 * operations, 8299 * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been 8300 * configured for the producer instance, 8301 * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance, 8302 * Other error codes not listed here may be returned, depending on 8303 * broker version. 8304 * 8305 * @remark The returned error object (if not NULL) must be destroyed with 8306 * rd_kafka_error_destroy(). 8307 */ 8308 RD_EXPORT 8309 rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms); 8310 8311 8312 /** 8313 * @brief Aborts the ongoing transaction. 8314 * 8315 * This function should also be used to recover from non-fatal abortable 8316 * transaction errors. 8317 * 8318 * Any outstanding messages will be purged and fail with 8319 * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT or RD_KAFKA_RESP_ERR__PURGE_QUEUE. 8320 * See rd_kafka_purge() for details. 8321 * 8322 * @param rk Producer instance. 8323 * @param timeout_ms The maximum time to block. On timeout the operation 8324 * may continue in the background, depending on state, 8325 * and it is okay to call this function again. 8326 * Pass -1 to use the remaining transaction timeout, 8327 * this is the recommended use. 8328 * 8329 * @remark It is strongly recommended to always pass -1 (remaining transaction 8330 * time) as the \p timeout_ms. Using other values risk internal 8331 * state desynchronization in case any of the underlying protocol 8332 * requests fail. 8333 * 8334 * @remark This function will block until all outstanding messages are purged 8335 * and the transaction abort request has been successfully 8336 * handled by the transaction coordinator, or until \p timeout_ms 8337 * expires, which ever comes first. On timeout the application may 8338 * call the function again. 8339 * If the application has enabled RD_KAFKA_EVENT_DR it must 8340 * serve the event queue in a separate thread since rd_kafka_flush() 8341 * will not serve delivery reports in this mode. 8342 8343 * 8344 * @returns NULL on success or an error object on failure. 8345 * Check whether the returned error object permits retrying 8346 * by calling rd_kafka_error_is_retriable(), or whether a fatal error 8347 * has been raised by calling rd_kafka_error_is_fatal(). 8348 * Error codes: 8349 * RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, 8350 * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be 8351 * complete commmitted within \p timeout_ms, this is a retriable 8352 * error as the commit continues in the background, 8353 * RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer 8354 * transaction has been fenced by a newer producer instance, 8355 * RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the 8356 * producer is no longer authorized to perform transactional 8357 * operations, 8358 * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been 8359 * configured for the producer instance, 8360 * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance, 8361 * Other error codes not listed here may be returned, depending on 8362 * broker version. 8363 * 8364 * @remark The returned error object (if not NULL) must be destroyed with 8365 * rd_kafka_error_destroy(). 8366 */ 8367 RD_EXPORT 8368 rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms); 8369 8370 8371 /**@}*/ 8372 8373 /* @cond NO_DOC */ 8374 #ifdef __cplusplus 8375 } 8376 #endif 8377 #endif /* _RDKAFKA_H_ */ 8378 /* @endcond NO_DOC */