diff --git a/README.md b/README.md index 5aa705f3..f76f9bfb 100644 --- a/README.md +++ b/README.md @@ -24,13 +24,13 @@ It supports the same interfaces as the [PHP RdKafka extension](https://github.co ## Runtime Requirements * PHP ^7.4 or ^8.0 with extensions FFI enabled -* librdkafka ^1.0.0 +* librdkafka ^1.0.0 or ^2.0.0 * Conflicts: RdKafka extension * Suggested: * zend opcache extension for preloading * pcntl extension for faster shutdown in request/response context -Note: Support for macOS and Windows is currently experimental. +Note: Support for macOS and Windows is experimental. ## Installation diff --git a/resources/ffigen/LibrdkafkaDocumentation.php b/resources/ffigen/LibrdkafkaDocumentation.php index ebce03f4..7adea4c5 100644 --- a/resources/ffigen/LibrdkafkaDocumentation.php +++ b/resources/ffigen/LibrdkafkaDocumentation.php @@ -14,7 +14,7 @@ class LibrdkafkaDocumentation protected array $documentedElements; private string $url; - public function __construct(string $url = 'https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html') + public function __construct(string $url = 'https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html') { $this->url = $url; } @@ -23,19 +23,20 @@ public function extract(): void { echo 'Download and prepare librdkafka documentation ...'; - $page = new Crawler(file_get_contents($this->url)); - $elements = $page->filter('a[class=anchor]')->each( + $html = file_get_contents($this->url); + $page = new Crawler($html); + $elements = $page->filter('div[class=contents] a[id]')->each( function (Crawler $node, $i) { // extract defines & methods if ($node->nextAll()->count() - && $node->nextAll()->first()->filter('td[class=memname]')->count() - && $node->nextAll()->first()->filter('div[class=memdoc]')->count() + && $node->nextAll()->eq(1)->filter('td[class=memname]')->count() + && $node->nextAll()->eq(1)->filter('div[class=memdoc]')->count() ) { // extract enumerators - if ($node->nextAll()->first()->filter('td[class=fieldname]')->count() - && $node->nextAll()->first()->filter('td[class=fielddoc]')->count() + if ($node->nextAll()->eq(1)->filter('td[class=fieldname]')->count() + && $node->nextAll()->eq(1)->filter('td[class=fielddoc]')->count() ) { - $enums = $node->nextAll()->first()->filter('td[class=fieldname]')->each( + $enums = $node->nextAll()->eq(1)->filter('td[class=fieldname]')->each( function (Crawler $node, $i) { return [ 'name' => $this->filterHtml($node->text()), @@ -46,8 +47,8 @@ function (Crawler $node, $i) { } // extract params - and remove them (from description) - if ($node->nextAll()->first()->filter('td[class=paramname]')->count()) { - $params = $node->nextAll()->first()->filter('td[class=paramname]')->each( + if ($node->nextAll()->eq(1)->filter('td[class=paramname]')->count()) { + $params = $node->nextAll()->eq(1)->filter('td[class=paramname]')->each( function (Crawler $node, $i) { return [ 'name' => $this->filterHtml($node->text()), @@ -55,7 +56,7 @@ function (Crawler $node, $i) { ]; } ); - $node->nextAll()->first()->filter('dl[class=params]')->each( + $node->nextAll()->eq(1)->filter('dl[class=params]')->each( function (Crawler $node, $i): void { $node->getNode(0)->parentNode->removeChild($node->getNode(0)); } @@ -63,9 +64,9 @@ function (Crawler $node, $i): void { } // extract return - and remove them (from description) - if ($node->nextAll()->first()->filter('dl[class*=return] dd')->count()) { - $return = $this->filterHtml($node->nextAll()->first()->filter('dl[class*=return] dd')->first()->html('')); - $node->nextAll()->first()->filter('dl[class*=return]')->each( + if ($node->nextAll()->eq(1)->filter('dl[class*=return] dd')->count()) { + $return = $this->filterHtml($node->nextAll()->eq(1)->filter('dl[class*=return] dd')->first()->html('')); + $node->nextAll()->eq(1)->filter('dl[class*=return]')->each( function (Crawler $node, $i): void { $node->getNode(0)->parentNode->removeChild($node->getNode(0)); } @@ -74,11 +75,11 @@ function (Crawler $node, $i): void { return [ 'id' => $node->attr('id'), - 'name' => $node->nextAll()->first()->filter('td[class=memname]')->count() - ? $this->filterHtml($node->nextAll()->first()->filter('td[class=memname]')->first()->html('')) + 'name' => $node->nextAll()->eq(1)->filter('td[class=memname]')->count() + ? $this->filterHtml($node->nextAll()->eq(1)->filter('td[class=memname]')->first()->html('')) : '', - 'description' => $node->nextAll()->first()->filter('div[class=memdoc]')->count() - ? $this->filterHtml($node->nextAll()->first()->filter('div[class=memdoc]')->first()->html('')) + 'description' => $node->nextAll()->eq(1)->filter('div[class=memdoc]')->count() + ? $this->filterHtml($node->nextAll()->eq(1)->filter('div[class=memdoc]')->first()->html('')) : '', 'params' => $params ?? [], 'return' => $return ?? null, diff --git a/resources/ffigen/LibrdkafkaHeaderFiles.php b/resources/ffigen/LibrdkafkaHeaderFiles.php index fa8fef1e..43da717e 100644 --- a/resources/ffigen/LibrdkafkaHeaderFiles.php +++ b/resources/ffigen/LibrdkafkaHeaderFiles.php @@ -44,7 +44,7 @@ public function prepareVersion(string $version): void $content = @file_get_contents($this->config->getOutputPath() . '/' . $version . '-' . $fileName); } else { $url = $baseUrl . '/' . $fileName; - echo " Download ${url}" . PHP_EOL; + echo " Download {$url}" . PHP_EOL; $content = @file_get_contents($url); if ($content === false) { @@ -57,7 +57,7 @@ public function prepareVersion(string $version): void $content = $this->prepareFileContent($file, $content, $version); $this->filesystem->dumpFile($file, $content); - echo " Save as ${file}" . PHP_EOL; + echo " Save as {$file}" . PHP_EOL; } } diff --git a/src/RdKafka/FFI/Methods.php b/src/RdKafka/FFI/Methods.php index 26bd8134..1222681c 100644 --- a/src/RdKafka/FFI/Methods.php +++ b/src/RdKafka/FFI/Methods.php @@ -23,7 +23,7 @@ abstract public static function getFFI():\FFI; * Use rd_kafka_version_str() to retreive the version as a string. * @return int|null int - ) - Version integer. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a83e363606ef2da2e91b7429b229dbc8e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a83e363606ef2da2e91b7429b229dbc8e */ public static function rd_kafka_version(): ?int { @@ -34,7 +34,7 @@ public static function rd_kafka_version(): ?int *

Returns the librdkafka version as string.

* @return string|null const char* - ) - Version string * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a0cc60434083686fd8e379a905652d34a + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a0cc60434083686fd8e379a905652d34a */ public static function rd_kafka_version_str(): ?string { @@ -45,7 +45,7 @@ public static function rd_kafka_version_str(): ?string *

Retrieve supported debug contexts for use with the "debug" configuration property. (runtime)

* @return string|null const char* - ) - Comma-separated list of available debugging contexts. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#adece97d3cbdd6ca936df5b0663118c45 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#adece97d3cbdd6ca936df5b0663118c45 */ public static function rd_kafka_get_debug_contexts(): ?string { @@ -67,7 +67,7 @@ public static function rd_kafka_get_err_descs(?\FFI\CData $errdescs, ?\FFI\CData * @param int $err rd_kafka_resp_err_t - ) - Error code to translate * @return string|null const char* * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ab7bfc925e8d63851511b88a1cee94d6d + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab7bfc925e8d63851511b88a1cee94d6d */ public static function rd_kafka_err2str(int $err): ?string { @@ -79,7 +79,7 @@ public static function rd_kafka_err2str(int $err): ?string * @param int $err rd_kafka_resp_err_t - ) - Error code to translate * @return string|null const char* * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a8d5f6f2775ec67b124abeb5dfada2d77 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8d5f6f2775ec67b124abeb5dfada2d77 */ public static function rd_kafka_err2name(int $err): ?string { @@ -99,10 +99,12 @@ public static function rd_kafka_err2name(int $err): ?string *
  • rd_kafka_produce()
  • * *

    The main use for this function is to avoid converting system errno values to rd_kafka_resp_err_t codes for legacy APIs.

    - *
    Remarks
    The last error is stored per-thread, if multiple rd_kafka_t handles are used in the same application thread the developer needs to make sure rd_kafka_last_error() is called immediately after a failed API call.
    + *
    Remarks
    The last error is stored per-thread, if multiple rd_kafka_t handles are used in the same application thread the developer needs to make sure rd_kafka_last_error() is called immediately after a failed API call.
    + *
    + * errno propagation from librdkafka is not safe on Windows and should not be used, use rd_kafka_last_error() instead.
    * @return int rd_kafka_resp_err_t - ) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ae7b90c323d460e0276d79f6ab69e93b7 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ae7b90c323d460e0276d79f6ab69e93b7 */ public static function rd_kafka_last_error(): int { @@ -124,11 +126,12 @@ public static function rd_kafka_last_error(): int * * *
    Remarks
    A better alternative is to call rd_kafka_last_error() immediately after any of the above functions return -1 or NULL.
    + *
    Deprecated:
    Use rd_kafka_last_error() to retrieve the last error code set by the legacy librdkafka APIs.
    *
    See also
    rd_kafka_last_error()
    * @param int|null $errnox int - ) - System errno value to convert * @return int rd_kafka_resp_err_t - Appropriate error code for errnox * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a99c7d1faaa534befeedf23b55a88a40f + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abeabf8589c657d7a3ec5a1411a7de91d */ public static function rd_kafka_errno2err(?int $errnox): int { @@ -137,11 +140,12 @@ public static function rd_kafka_errno2err(?int $errnox): int /** *

    Returns the thread-local system errno.

    - *

    On most platforms this is the same as errno but in case of different runtimes between library and application (e.g., Windows static DLLs) this provides a means for expsing the errno librdkafka uses.

    - *
    Remarks
    The value is local to the current calling thread.
    + *

    On most platforms this is the same as errno but in case of different runtimes between library and application (e.g., Windows static DLLs) this provides a means for exposing the errno librdkafka uses.

    + *
    Remarks
    The value is local to the current calling thread.
    + *
    Deprecated:
    Use rd_kafka_last_error() to retrieve the last error code set by the legacy librdkafka APIs.
    * @return int|null int - ) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aef27224d8c638e51f3ee29bb25f65f1f + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8aa1b1a41d2fc0c487f519663609df4f */ public static function rd_kafka_errno(): ?int { @@ -149,11 +153,16 @@ public static function rd_kafka_errno(): ?int } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $errstr char* + *

    Returns the first fatal error set on this client instance, or RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has occurred.

    + *

    This function is to be used with the Idempotent Producer and error_cb to detect fatal errors.

    + *

    Generally all errors raised by error_cb are to be considered informational and temporary, the client will try to recover from all errors in a graceful fashion (by retrying, etc).

    + *

    However, some errors should logically be considered fatal to retain consistency; in particular a set of errors that may occur when using the Idempotent Producer and the in-order or exactly-once producer guarantees can't be satisfied.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param \FFI\CData|null $errstr char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written to if there is a fatal error. - Writable size in errstr. * @param int|null $errstr_size size_t - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has been raised, else any other error code. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a44c976534da6f3877cc514826c71607c */ public static function rd_kafka_fatal_error(?\FFI\CData $rk, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -161,11 +170,14 @@ public static function rd_kafka_fatal_error(?\FFI\CData $rk, ?\FFI\CData $errstr } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param int $err rd_kafka_resp_err_t - * @param string|null $reason const char* - * @return int rd_kafka_resp_err_t + *

    Trigger a fatal error for testing purposes.

    + *

    Since there is no practical way to trigger real fatal errors in the idempotent producer, this method allows an application to trigger fabricated fatal errors in tests to check its error handling code.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param int $err rd_kafka_resp_err_t - The underlying error code. + * @param string|null $reason const char* - A human readable error reason. Will be prefixed with "test_fatal_error: " to differentiate from real fatal errors. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if a fatal error was triggered, or RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error has already been triggered. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a2e603ee9942480165be3c48f006a7171 */ public static function rd_kafka_test_fatal_error(?\FFI\CData $rk, int $err, ?string $reason): int { @@ -177,7 +189,7 @@ public static function rd_kafka_test_fatal_error(?\FFI\CData $rk, int $err, ?str *
    Remarks
    This must not be called for elements in a topic partition list.
    * @param \FFI\CData|null $rktpar rd_kafka_topic_partition_t* - ) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ac5a7b02e3af816cfacbcfa6468c40c9a + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ac5a7b02e3af816cfacbcfa6468c40c9a */ public static function rd_kafka_topic_partition_destroy(?\FFI\CData $rktpar): void { @@ -193,7 +205,7 @@ public static function rd_kafka_topic_partition_destroy(?\FFI\CData $rktpar): vo * @param int|null $size int - ) - Initial allocated size used when the expected number of elements is known or can be estimated. Avoids reallocation and possibly relocation of the elems array. * @return \FFI\CData|null rd_kafka_topic_partition_list_t* - A newly allocated Topic+Partition list. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#afb87d24333b6ad5a7415b06882f06b2a + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#afb87d24333b6ad5a7415b06882f06b2a */ public static function rd_kafka_topic_partition_list_new(?int $size): ?\FFI\CData { @@ -216,7 +228,7 @@ public static function rd_kafka_topic_partition_list_destroy(?\FFI\CData $rkparl * @param int|null $partition int32_t - Partition id * @return \FFI\CData|null rd_kafka_topic_partition_t* - The object which can be used to fill in additionals fields. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a123ce30e08b31d4ff0fcf6ebe876173d + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a123ce30e08b31d4ff0fcf6ebe876173d */ public static function rd_kafka_topic_partition_list_add(?\FFI\CData $rktparlist, ?string $topic, ?int $partition): ?\FFI\CData { @@ -230,7 +242,7 @@ public static function rd_kafka_topic_partition_list_add(?\FFI\CData $rktparlist * @param int|null $start int32_t - Start partition of range * @param int|null $stop int32_t - Last partition of range (inclusive) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a6365695de425e7866ddd0c59d704111b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6365695de425e7866ddd0c59d704111b */ public static function rd_kafka_topic_partition_list_add_range(?\FFI\CData $rktparlist, ?string $topic, ?int $start, ?int $stop): void { @@ -247,7 +259,7 @@ public static function rd_kafka_topic_partition_list_add_range(?\FFI\CData $rktp * @param int|null $partition int32_t - Partition to match * @return int|null int - 1 if partition was found (and removed), else 0. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a94a8195aa5f0195d020494bced858a97 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a94a8195aa5f0195d020494bced858a97 */ public static function rd_kafka_topic_partition_list_del(?\FFI\CData $rktparlist, ?string $topic, ?int $partition): ?int { @@ -262,7 +274,7 @@ public static function rd_kafka_topic_partition_list_del(?\FFI\CData $rktparlist * @param int|null $idx int * @return int|null int - 1 if partition was found (and removed), else 0. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a8476ebf3c2f54ddee53e0863feb85463 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8476ebf3c2f54ddee53e0863feb85463 */ public static function rd_kafka_topic_partition_list_del_by_idx(?\FFI\CData $rktparlist, ?int $idx): ?int { @@ -274,7 +286,7 @@ public static function rd_kafka_topic_partition_list_del_by_idx(?\FFI\CData $rkt * @param \FFI\CData|null $src const rd_kafka_topic_partition_list_t* - ) - The existing list to copy. * @return \FFI\CData|null rd_kafka_topic_partition_list_t* - A new list fully populated to be identical to src * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a66fd3f8c00ffbd0ea740a638dd0a95f7 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a66fd3f8c00ffbd0ea740a638dd0a95f7 */ public static function rd_kafka_topic_partition_list_copy(?\FFI\CData $src): ?\FFI\CData { @@ -289,7 +301,7 @@ public static function rd_kafka_topic_partition_list_copy(?\FFI\CData $src): ?\F * @param int|null $offset int64_t * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if partition was not found in the list. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a102b340b901babb247d2c0a8580a094d + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a102b340b901babb247d2c0a8580a094d */ public static function rd_kafka_topic_partition_list_set_offset(?\FFI\CData $rktparlist, ?string $topic, ?int $partition, ?int $offset): int { @@ -303,7 +315,7 @@ public static function rd_kafka_topic_partition_list_set_offset(?\FFI\CData $rkt * @param int|null $partition int32_t * @return \FFI\CData|null rd_kafka_topic_partition_t* - a pointer to the first matching element, or NULL if not found. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ab25d8e4e58c891bdc533471c210697fa + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a122c284913412e8e043fe7b51aebc0d7 */ public static function rd_kafka_topic_partition_list_find(?\FFI\CData $rktparlist, ?string $topic, ?int $partition): ?\FFI\CData { @@ -312,12 +324,13 @@ public static function rd_kafka_topic_partition_list_find(?\FFI\CData $rktparlis /** *

    Sort list using comparator cmp.

    - *

    If cmp is NULL the default comparator will be used that sorts by ascending topic name and partition.

    + *

    If cmp is NULL the default comparator will be used that sorts by ascending topic name and partition.

    + *

    cmp_opaque is provided as the cmp_opaque argument to cmp.

    * @param \FFI\CData|null $rktparlist rd_kafka_topic_partition_list_t* * @param \FFI\CData|\Closure $cmp int(*)(const void*, const void*, void*) * @param \FFI\CData|object|string|null $opaque void* * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ab72ed92794eabf2e7ba1b7be9c94de1f + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af56e41c6cb82d6ded5cd973976cd62df */ public static function rd_kafka_topic_partition_list_sort(?\FFI\CData $rktparlist, $cmp, $opaque): void { @@ -325,9 +338,11 @@ public static function rd_kafka_topic_partition_list_sort(?\FFI\CData $rktparlis } /** - * @param int|null $initial_count size_t + *

    Create a new headers list.

    + * @param int|null $initial_count size_t - ) - Preallocate space for this number of headers. Any number of headers may be added, updated and removed regardless of the initial count. * @return \FFI\CData|null rd_kafka_headers_t* * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9fdd55ee90816f38324f5d0af1b4fc1f */ public static function rd_kafka_headers_new(?int $initial_count): ?\FFI\CData { @@ -354,13 +369,15 @@ public static function rd_kafka_headers_copy(?\FFI\CData $src): ?\FFI\CData } /** - * @param \FFI\CData|null $hdrs rd_kafka_headers_t* - * @param string|null $name const char* - * @param int|null $name_size ssize_t - * @param \FFI\CData|object|string|null $value const void* - * @param int|null $value_size ssize_t - * @return int rd_kafka_resp_err_t + *

    Add header with name name and value val (copied) of size size (not including null-terminator).

    + * @param \FFI\CData|null $hdrs rd_kafka_headers_t* - Headers list. + * @param string|null $name const char* - Header name. + * @param int|null $name_size ssize_t - Header name size (not including the null-terminator). If -1 the name length is automatically acquired using strlen(). + * @param \FFI\CData|object|string|null $value const void* - Pointer to header value, or NULL (set size to 0 or -1). + * @param int|null $value_size ssize_t - Size of header value. If -1 the value is assumed to be a null-terminated string and the length is automatically acquired using strlen(). + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, else RD_KAFKA_RESP_ERR_NO_ERROR. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad86e25f9787e647588fc2067705de10e */ public static function rd_kafka_header_add(?\FFI\CData $hdrs, ?string $name, ?int $name_size, $value, ?int $value_size): int { @@ -368,10 +385,12 @@ public static function rd_kafka_header_add(?\FFI\CData $hdrs, ?string $name, ?in } /** + *

    Remove all headers for the given key (if any).

    * @param \FFI\CData|null $hdrs rd_kafka_headers_t* * @param string|null $name const char* - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, RD_KAFKA_RESP_ERR__NOENT if no matching headers were found, else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8a8d23d274cf233b6b4b0a345c0180c1 */ public static function rd_kafka_header_remove(?\FFI\CData $hdrs, ?string $name): int { @@ -379,12 +398,19 @@ public static function rd_kafka_header_remove(?\FFI\CData $hdrs, ?string $name): } /** - * @param \FFI\CData|null $hdrs const rd_kafka_headers_t* - * @param string|null $name const char* - * @param \FFI\CData|object|string|null $valuep const void** - * @param \FFI\CData|null $sizep size_t* - * @return int rd_kafka_resp_err_t + *

    Find last header in list hdrs matching name.

    + * + * + *
    Remarks
    The returned pointer in valuep includes a trailing null-terminator that is not accounted for in sizep.
    + *
    + * The returned pointer is only valid as long as the headers list and the header item is valid.
    + * @param \FFI\CData|null $hdrs const rd_kafka_headers_t* - Headers list. + * @param string|null $name const char* - Header to find (last match). + * @param \FFI\CData|object|string|null $valuep const void** - (out) Set to a (null-terminated) const pointer to the value (may be NULL). + * @param \FFI\CData|null $sizep size_t* - (out) Set to the value's size (not including null-terminator). + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if an entry was found, else RD_KAFKA_RESP_ERR__NOENT. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6cb8628d390cd89a6d9daada8c70161a */ public static function rd_kafka_header_get_last(?\FFI\CData $hdrs, ?string $name, $valuep, ?\FFI\CData $sizep): int { @@ -392,13 +418,17 @@ public static function rd_kafka_header_get_last(?\FFI\CData $hdrs, ?string $name } /** - * @param \FFI\CData|null $hdrs const rd_kafka_headers_t* - * @param int|null $idx size_t - * @param string|null $name const char* - * @param \FFI\CData|object|string|null $valuep const void** - * @param \FFI\CData|null $sizep size_t* + *

    Iterator for headers matching name.

    + *
       Same semantics as rd_kafka_header_get_last()
    +     * 
    + * @param \FFI\CData|null $hdrs const rd_kafka_headers_t* - Headers to iterate. + * @param int|null $idx size_t - Iterator index, start at 0 and increment by one for each call as long as RD_KAFKA_RESP_ERR_NO_ERROR is returned. + * @param string|null $name const char* - Header name to match. + * @param \FFI\CData|object|string|null $valuep const void** - (out) Set to a (null-terminated) const pointer to the value (may be NULL). + * @param \FFI\CData|null $sizep size_t* - (out) Set to the value's size (not including null-terminator). * @return int rd_kafka_resp_err_t * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5cf2d5ce4869203aaa9e18ad2c763bbf */ public static function rd_kafka_header_get(?\FFI\CData $hdrs, ?int $idx, ?string $name, $valuep, ?\FFI\CData $sizep): int { @@ -406,6 +436,9 @@ public static function rd_kafka_header_get(?\FFI\CData $hdrs, ?int $idx, ?string } /** + *

    Iterator for all headers.

    + *
       Same semantics as rd_kafka_header_get()
    +     * 
    See also
    rd_kafka_header_get()
    * @param \FFI\CData|null $hdrs const rd_kafka_headers_t* * @param int|null $idx size_t * @param \FFI\CData|null $namep const char** @@ -413,6 +446,7 @@ public static function rd_kafka_header_get(?\FFI\CData $hdrs, ?int $idx, ?string * @param \FFI\CData|null $sizep size_t* * @return int rd_kafka_resp_err_t * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a3a39136e1d13591f668158c0fa20961b */ public static function rd_kafka_header_get_all(?\FFI\CData $hdrs, ?int $idx, ?\FFI\CData $namep, $valuep, ?\FFI\CData $sizep): int { @@ -438,7 +472,7 @@ public static function rd_kafka_message_destroy(?\FFI\CData $rkmessage): void * @param \FFI\CData|null $tstype rd_kafka_timestamp_type_t* * @return int|null int64_t - message timestamp, or -1 if not available. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a4371deba4afe6941cc5f9e80df5ca3e7 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a4371deba4afe6941cc5f9e80df5ca3e7 */ public static function rd_kafka_message_timestamp(?\FFI\CData $rkmessage, ?\FFI\CData $tstype): ?int { @@ -446,9 +480,11 @@ public static function rd_kafka_message_timestamp(?\FFI\CData $rkmessage, ?\FFI\ } /** - * @param \FFI\CData|null $rkmessage const rd_kafka_message_t* - * @return int|null int64_t + *

    Returns the latency for a produced message measured from the produce() call.

    + * @param \FFI\CData|null $rkmessage const rd_kafka_message_t* - ) + * @return int|null int64_t - the latency in microseconds, or -1 if not available. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aba20435b42efef7a3c38dee3fb58c27b */ public static function rd_kafka_message_latency(?\FFI\CData $rkmessage): ?int { @@ -456,10 +492,17 @@ public static function rd_kafka_message_latency(?\FFI\CData $rkmessage): ?int } /** + *

    Get the message header list.

    + *

    The returned pointer in *hdrsp is associated with the rkmessage and must not be used after destruction of the message object or the header list is replaced with rd_kafka_message_set_headers().

    + * + *
    Remarks
    Headers require broker version 0.11.0.0 or later.
    + *
    + * As an optimization the raw protocol headers are parsed on the first call to this function.
    * @param \FFI\CData|null $rkmessage const rd_kafka_message_t* * @param \FFI\CData|null $hdrsp rd_kafka_headers_t** - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if headers were returned, RD_KAFKA_RESP_ERR__NOENT if the message has no headers, or another error code if the headers could not be parsed. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aaccd9ee7e1c73b89d86ed41db3b86e68 */ public static function rd_kafka_message_headers(?\FFI\CData $rkmessage, ?\FFI\CData $hdrsp): int { @@ -467,10 +510,14 @@ public static function rd_kafka_message_headers(?\FFI\CData $rkmessage, ?\FFI\CD } /** + *

    Get the message header list and detach the list from the message making the application the owner of the headers. The application must eventually destroy the headers using rd_kafka_headers_destroy(). The message's headers will be set to NULL.

    + *

    Otherwise same semantics as rd_kafka_message_headers()

    + *
    See also
    rd_kafka_message_headers
    * @param \FFI\CData|null $rkmessage rd_kafka_message_t* * @param \FFI\CData|null $hdrsp rd_kafka_headers_t** * @return int rd_kafka_resp_err_t * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af89ef912d3c1ff66ea612fda27c0a643 */ public static function rd_kafka_message_detach_headers(?\FFI\CData $rkmessage, ?\FFI\CData $hdrsp): int { @@ -478,9 +525,13 @@ public static function rd_kafka_message_detach_headers(?\FFI\CData $rkmessage, ? } /** - * @param \FFI\CData|null $rkmessage rd_kafka_message_t* - * @param \FFI\CData|null $hdrs rd_kafka_headers_t* + *

    Replace the message's current headers with a new list.

    + * + *
    Remarks
    The existing headers object, if any, will be destroyed.
    + * @param \FFI\CData|null $rkmessage rd_kafka_message_t* - The message to set headers. + * @param \FFI\CData|null $hdrs rd_kafka_headers_t* - New header list. The message object assumes ownership of the list, the list will be destroyed automatically with the message object. The new headers list may be updated until the message object is passed or returned to librdkafka. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a99e46233e57fc4f5783d8ae374584bbe */ public static function rd_kafka_message_set_headers(?\FFI\CData $rkmessage, ?\FFI\CData $hdrs): void { @@ -488,9 +539,11 @@ public static function rd_kafka_message_set_headers(?\FFI\CData $rkmessage, ?\FF } /** - * @param \FFI\CData|null $hdrs const rd_kafka_headers_t* + *

    Returns the number of header key/value pairs.

    + * @param \FFI\CData|null $hdrs const rd_kafka_headers_t* - ) - Headers to count * @return int|null size_t * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af8ce5228eef3ea0a9209965902b6fcdd */ public static function rd_kafka_header_cnt(?\FFI\CData $hdrs): ?int { @@ -498,9 +551,12 @@ public static function rd_kafka_header_cnt(?\FFI\CData $hdrs): ?int } /** - * @param \FFI\CData|null $rkmessage const rd_kafka_message_t* + *

    Returns the message's persistence status in the topic log.

    + *
    Remarks
    The message status is not available in on_acknowledgement interceptors.
    + * @param \FFI\CData|null $rkmessage const rd_kafka_message_t* - ) * @return int rd_kafka_msg_status_t * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af259b9c4216b74d4f750d93ff6396ce5 */ public static function rd_kafka_message_status(?\FFI\CData $rkmessage): int { @@ -509,23 +565,29 @@ public static function rd_kafka_message_status(?\FFI\CData $rkmessage): int /** *

    Create configuration object.

    - *

    When providing your own configuration to the rd_kafka_*_new_*() calls the rd_kafka_conf_t objects needs to be created with this function which will set up the defaults. I.e.:

    1 rd_kafka_conf_t *myconf;
    - *
    2 rd_kafka_conf_res_t res;
    - *
    3 
    - *
    4 myconf = rd_kafka_conf_new();
    - *
    5 res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600",
    - *
    6  errstr, sizeof(errstr));
    - *
    7 if (res != RD_KAFKA_CONF_OK)
    - *
    8  die("%s\n", errstr);
    - *
    9 
    - *
    10 rk = rd_kafka_new(..., myconf);
    + *

    When providing your own configuration to the rd_kafka_*_new_*() calls the rd_kafka_conf_t objects needs to be created with this function which will set up the defaults. I.e.:

    rd_kafka_conf_t *myconf;
    + *
    rd_kafka_conf_res_t res;
    + *
    + *
    myconf = rd_kafka_conf_new();
    + *
    res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600",
    + *
    errstr, sizeof(errstr));
    + *
    if (res != RD_KAFKA_CONF_OK)
    + *
    die("%s\n", errstr);
    + *
    + *
    rk = rd_kafka_new(..., myconf);
    + *
    rd_kafka_new
    RD_EXPORT rd_kafka_t * rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size)
    Creates a new Kafka handle and starts its operation according to the specified type (RD_KAFKA_CONSUME...
    + *
    rd_kafka_conf_new
    RD_EXPORT rd_kafka_conf_t * rd_kafka_conf_new(void)
    Create configuration object.
    + *
    rd_kafka_conf_set
    RD_EXPORT rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size)
    Sets a configuration property.
    + *
    rd_kafka_conf_res_t
    rd_kafka_conf_res_t
    Configuration result type.
    Definition: rdkafka.h:1603
    + *
    RD_KAFKA_CONF_OK
    @ RD_KAFKA_CONF_OK
    Definition: rdkafka.h:1608
    *

    Please see CONFIGURATION.md for the default settings or use rd_kafka_conf_properties_show() to provide the information at runtime.

    *

    The properties are identical to the Apache Kafka configuration properties whenever possible.

    + *
    Remarks
    A successful call to rd_kafka_new() will assume ownership of the conf object and rd_kafka_conf_destroy() must not be called.
    * - *
    See also
    rd_kafka_conf_set(), rd_kafka_conf_destroy()
    + *
    See also
    rd_kafka_new(), rd_kafka_conf_set(), rd_kafka_conf_destroy()
    * @return \FFI\CData|null rd_kafka_conf_t* - ) - A new rd_kafka_conf_t object with defaults set. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa7459bd22e8cfa81aa8c2480a4a0304c + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa7459bd22e8cfa81aa8c2480a4a0304c */ public static function rd_kafka_conf_new(): ?\FFI\CData { @@ -542,9 +604,13 @@ public static function rd_kafka_conf_destroy(?\FFI\CData $conf): void } /** - * @param \FFI\CData|null $conf const rd_kafka_conf_t* + *

    Creates a copy/duplicate of configuration object conf.

    + *
    Remarks
    Interceptors are NOT copied to the new configuration object.
    + *
    See also
    rd_kafka_interceptor_f_on_conf_dup
    + * @param \FFI\CData|null $conf const rd_kafka_conf_t* - ) * @return \FFI\CData|null rd_kafka_conf_t* * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8cbfe15c6978ff09870e82cb524c673d */ public static function rd_kafka_conf_dup(?\FFI\CData $conf): ?\FFI\CData { @@ -567,6 +633,8 @@ public static function rd_kafka_conf_dup_filter(?\FFI\CData $conf, ?int $filter_ *

    Sets a configuration property.

    *

    conf must have been previously created with rd_kafka_conf_new().

    *

    Fallthrough: Topic-level configuration properties may be set using this interface in which case they are applied on the default_topic_conf. If no default_topic_conf has been set one will be created. Any sub-sequent rd_kafka_conf_set_default_topic_conf() calls will replace the current default topic configuration.

    + * + *
    Remarks
    Setting properties or values that were disabled at build time due to missing dependencies will return RD_KAFKA_CONF_INVALID.
    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param string|null $name const char* * @param string|null $value const char* @@ -574,7 +642,7 @@ public static function rd_kafka_conf_dup_filter(?\FFI\CData $conf, ?int $filter_ * @param int|null $errstr_size size_t * @return int rd_kafka_conf_res_t - rd_kafka_conf_res_t to indicate success or failure. In case of failure errstr is updated to contain a human readable error string. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#abb1b319278333e8cdee9442da7f135e8 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abb1b319278333e8cdee9442da7f135e8 */ public static function rd_kafka_conf_set(?\FFI\CData $conf, ?string $name, ?string $value, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -592,9 +660,24 @@ public static function rd_kafka_conf_set_events(?\FFI\CData $conf, ?int $events) } /** + *

    Generic event callback to be used with the event API to trigger callbacks for rd_kafka_event_t objects from a background thread serving the background queue.

    + *

    How to use:

      + *
    1. First set the event callback on the configuration object with this function, followed by creating an rd_kafka_t instance with rd_kafka_new().
    2. + *
    3. Get the instance's background queue with rd_kafka_queue_get_background() and pass it as the reply/response queue to an API that takes an event queue, such as rd_kafka_CreateTopics().
    4. + *
    5. As the response event is ready and enqueued on the background queue the event callback will be triggered from the background thread.
    6. + *
    7. Prior to destroying the client instance, loose your reference to the background queue by calling rd_kafka_queue_destroy().
    8. + *
    + *

    The application must destroy the rkev passed to event cb using rd_kafka_event_destroy().

    + *

    The event_cb opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    + *
    Remarks
    This callback is a specialized alternative to the poll-based event API described in the Event interface section.
    + *
    + * The event_cb will be called spontaneously from a background thread completely managed by librdkafka. Take care to perform proper locking of application objects.
    + *
    Warning
    The application MUST NOT call rd_kafka_destroy() from the event callback.
    + *
    See also
    rd_kafka_queue_get_background
    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $event_cb void(*)(rd_kafka_t*, rd_kafka_event_t*, void*) * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5ce6c329ca692674b1c42460f9bab521 */ public static function rd_kafka_conf_set_background_event_cb(?\FFI\CData $conf, $event_cb): void { @@ -606,7 +689,7 @@ public static function rd_kafka_conf_set_background_event_cb(?\FFI\CData $conf, * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $dr_cb void(*)(rd_kafka_t*, void*, size_t, rd_kafka_resp_err_t, void*, void*) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a075b15c4141784fdc271de217005a41f + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a075b15c4141784fdc271de217005a41f */ public static function rd_kafka_conf_set_dr_cb(?\FFI\CData $conf, $dr_cb): void { @@ -616,12 +699,15 @@ public static function rd_kafka_conf_set_dr_cb(?\FFI\CData $conf, $dr_cb): void /** *

    Producer: Set delivery report callback in provided conf object.

    *

    The delivery report callback will be called once for each message accepted by rd_kafka_produce() (et.al) with err set to indicate the result of the produce request.

    - *

    The callback is called when a message is succesfully produced or if librdkafka encountered a permanent failure, or the retry counter for temporary errors has been exhausted.

    - *

    An application must call rd_kafka_poll() at regular intervals to serve queued delivery report callbacks.

    + *

    The callback is called when a message is succesfully produced or if librdkafka encountered a permanent failure. Delivery errors occur when the retry count is exceeded, when the message.timeout.ms timeout is exceeded or there is a permanent error like RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART.

    + *

    An application must call rd_kafka_poll() at regular intervals to serve queued delivery report callbacks.

    + *

    The broker-assigned offset can be retrieved with rkmessage->offset and the timestamp can be retrieved using rd_kafka_message_timestamp().

    + *

    The dr_msg_cb opaque argument is the opaque set with rd_kafka_conf_set_opaque(). The per-message msg_opaque value is available in rd_kafka_message_t._private.

    + *
    Remarks
    The Idempotent Producer may return invalid timestamp (RD_KAFKA_TIMESTAMP_NOT_AVAILABLE), and and offset (RD_KAFKA_OFFSET_INVALID) for retried messages that were previously successfully delivered but not properly acknowledged.
    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $dr_msg_cb void(*)(rd_kafka_t*, const rd_kafka_message_t*, void*) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ac1c9946aee26e10de2661fcf2242ea8a + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ac1c9946aee26e10de2661fcf2242ea8a */ public static function rd_kafka_conf_set_dr_msg_cb(?\FFI\CData $conf, $dr_msg_cb): void { @@ -629,9 +715,12 @@ public static function rd_kafka_conf_set_dr_msg_cb(?\FFI\CData $conf, $dr_msg_cb } /** + *

    Consumer: Set consume callback for use with rd_kafka_consumer_poll()

    + *

    The consume_cb opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $consume_cb void(*)(rd_kafka_message_t*, void*) * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a65eaf45e9b26bcb085f4651634ce06a5 */ public static function rd_kafka_conf_set_consume_cb(?\FFI\CData $conf, $consume_cb): void { @@ -643,39 +732,67 @@ public static function rd_kafka_conf_set_consume_cb(?\FFI\CData $conf, $consume_ *

    The err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions' contains the full partition set that was either assigned or revoked.

    *

    Registering a rebalance_cb turns off librdkafka's automatic partition assignment/revocation and instead delegates that responsibility to the application's rebalance_cb.

    *

    The rebalance callback is responsible for updating librdkafka's assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle arbitrary rebalancing failures where err is neither of those.

    Remarks
    In this latter case (arbitrary error), the application must call rd_kafka_assign(rk, NULL) to synchronize state.
    - *

    Without a rebalance callback this is done automatically by librdkafka but registering a rebalance callback gives the application flexibility in performing other operations along with the assinging/revocation, such as fetching offsets from an alternate location (on assign) or manually committing offsets (on revoke).

    - *
    Remarks
    The partitions list is destroyed by librdkafka on return return from the rebalance_cb and must not be freed or saved by the application.
    - *

    The following example shows the application's responsibilities:

    1 static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
    - *
    2  rd_kafka_topic_partition_list_t *partitions,
    - *
    3  void *opaque) {
    - *
    4 
    - *
    5  switch (err)
    - *
    6  {
    - *
    7  case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
    - *
    8  // application may load offets from arbitrary external
    - *
    9  // storage here and update \p partitions
    - *
    10 
    - *
    11  rd_kafka_assign(rk, partitions);
    - *
    12  break;
    - *
    13 
    - *
    14  case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
    - *
    15  if (manual_commits) // Optional explicit manual commit
    - *
    16  rd_kafka_commit(rk, partitions, 0); // sync commit
    - *
    17 
    - *
    18  rd_kafka_assign(rk, NULL);
    - *
    19  break;
    - *
    20 
    - *
    21  default:
    - *
    22  handle_unlikely_error(err);
    - *
    23  rd_kafka_assign(rk, NULL); // sync state
    - *
    24  break;
    - *
    25  }
    - *
    26 }
    - *
    + *

    For eager/non-cooperative partition.assignment.strategy assignors, such as range and roundrobin, the application must use rd_kafka_assign() to set or clear the entire assignment. For the cooperative assignors, such as cooperative-sticky, the application must use rd_kafka_incremental_assign() for RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and rd_kafka_incremental_unassign() for RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS.

    + *

    Without a rebalance callback this is done automatically by librdkafka but registering a rebalance callback gives the application flexibility in performing other operations along with the assigning/revocation, such as fetching offsets from an alternate location (on assign) or manually committing offsets (on revoke).

    + *

    rebalance_cb is always triggered exactly once when a rebalance completes with a new assignment, even if that assignment is empty. If an eager/non-cooperative assignor is configured, there will eventually be exactly one corresponding call to rebalance_cb to revoke these partitions (even if empty), whether this is due to a group rebalance or lost partitions. In the cooperative case, rebalance_cb will never be called if the set of partitions being revoked is empty (whether or not lost).

    + *

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    + *
    Remarks
    The partitions list is destroyed by librdkafka on return return from the rebalance_cb and must not be freed or saved by the application.
    + *
    + * Be careful when modifying the partitions list. Changing this list should only be done to change the initial offsets for each partition. But a function like rd_kafka_position() might have unexpected effects for instance when a consumer gets assigned a partition it used to consume at an earlier rebalance. In this case, the list of partitions will be updated with the old offset for that partition. In this case, it is generally better to pass a copy of the list (see rd_kafka_topic_partition_list_copy()). The result of rd_kafka_position() is typically outdated in RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS.
    + *
    See also
    rd_kafka_assign()
    + *
    + * rd_kafka_incremental_assign()
    + *
    + * rd_kafka_incremental_unassign()
    + *
    + * rd_kafka_assignment_lost()
    + *
    + * rd_kafka_rebalance_protocol()
    + *

    The following example shows the application's responsibilities:

    static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
    + *
    rd_kafka_topic_partition_list_t *partitions,
    + *
    void *opaque) {
    + *
    + *
    switch (err)
    + *
    {
    + *
    case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
    + *
    // application may load offets from arbitrary external
    + *
    // storage here and update \p partitions
    + *
    if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
    + *
    rd_kafka_incremental_assign(rk, partitions);
    + *
    else // EAGER
    + *
    rd_kafka_assign(rk, partitions);
    + *
    break;
    + *
    + *
    case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
    + *
    if (manual_commits) // Optional explicit manual commit
    + *
    rd_kafka_commit(rk, partitions, 0); // sync commit
    + *
    + *
    if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
    + *
    rd_kafka_incremental_unassign(rk, partitions);
    + *
    else // EAGER
    + *
    rd_kafka_assign(rk, NULL);
    + *
    break;
    + *
    + *
    default:
    + *
    handle_unlikely_error(err);
    + *
    rd_kafka_assign(rk, NULL); // sync state
    + *
    break;
    + *
    }
    + *
    }
    + *
    rd_kafka_resp_err_t
    rd_kafka_resp_err_t
    Error codes.
    Definition: rdkafka.h:278
    + *
    RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
    @ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
    Definition: rdkafka.h:334
    + *
    RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
    @ RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
    Definition: rdkafka.h:336
    + *
    rd_kafka_assign
    RD_EXPORT rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions)
    Atomic assignment of partitions to consume.
    + *
    rd_kafka_incremental_unassign
    RD_EXPORT rd_kafka_error_t * rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions)
    Incrementally remove partitions from the current assignment.
    + *
    rd_kafka_incremental_assign
    RD_EXPORT rd_kafka_error_t * rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions)
    Incrementally add partitions to the current assignment.
    + *
    rd_kafka_rebalance_protocol
    RD_EXPORT const char * rd_kafka_rebalance_protocol(rd_kafka_t *rk)
    The rebalance protocol currently in use. This will be "NONE" if the consumer has not (yet) joined a g...
    + *
    rd_kafka_commit
    RD_EXPORT rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async)
    Commit offsets on broker for the provided list of partitions.
    + *
    rd_kafka_topic_partition_list_t
    A growable list of Topic+Partitions.
    Definition: rdkafka.h:917
    + *
    Remarks
    The above example lacks error handling for assign calls, see the examples/ directory.
    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $rebalance_cb void(*)(rd_kafka_t*, rd_kafka_resp_err_t, rd_kafka_topic_partition_list_t*, void*) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a10db731dc1a295bd9884e4f8cb199311 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a10db731dc1a295bd9884e4f8cb199311 */ public static function rd_kafka_conf_set_rebalance_cb(?\FFI\CData $conf, $rebalance_cb): void { @@ -688,12 +805,13 @@ public static function rd_kafka_conf_set_rebalance_cb(?\FFI\CData $conf, $rebala *

    If no partitions had valid offsets to commit this callback will be called with err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered an error.

    *

    The offsets list contains per-partition information:

    + *

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $offset_commit_cb void(*)(rd_kafka_t*, rd_kafka_resp_err_t, rd_kafka_topic_partition_list_t*, void*) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a1ab8bb9e8d8cdd5906f9e060b506f2eb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a1ab8bb9e8d8cdd5906f9e060b506f2eb */ public static function rd_kafka_conf_set_offset_commit_cb(?\FFI\CData $conf, $offset_commit_cb): void { @@ -702,12 +820,15 @@ public static function rd_kafka_conf_set_offset_commit_cb(?\FFI\CData $conf, $of /** *

    Set error callback in provided conf object.

    - *

    The error callback is used by librdkafka to signal critical errors back to the application.

    - *

    If no error_cb is registered then the errors will be logged instead.

    + *

    The error callback is used by librdkafka to signal warnings and errors back to the application.

    + *

    These errors should generally be considered informational and non-permanent, the client will try to recover automatically from all type of errors. Given that the client and cluster configuration is correct the application should treat these as temporary errors.

    + *

    error_cb will be triggered with err set to RD_KAFKA_RESP_ERR__FATAL if a fatal error has been raised; in this case use rd_kafka_fatal_error() to retrieve the fatal error code and error string, and then begin terminating the client instance.

    + *

    If no error_cb is registered, or RD_KAFKA_EVENT_ERROR has not been set with rd_kafka_conf_set_events, then the errors will be logged instead.

    + *

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $error_cb void(*)(rd_kafka_t*, int, const char*, void*) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ace721ef3b7c22d0c111ec747ef039a90 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ace721ef3b7c22d0c111ec747ef039a90 */ public static function rd_kafka_conf_set_error_cb(?\FFI\CData $conf, $error_cb): void { @@ -719,11 +840,12 @@ public static function rd_kafka_conf_set_error_cb(?\FFI\CData $conf, $error_cb): *

    The throttle callback is used to forward broker throttle times to the application for Produce and Fetch (consume) requests.

    *

    Callbacks are triggered whenever a non-zero throttle time is returned by the broker, or when the throttle time drops back to zero.

    *

    An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at regular intervals to serve queued callbacks.

    + *

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    *
    Remarks
    Requires broker version 0.9.0 or later.
    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $throttle_cb void(*)(rd_kafka_t*, const char*, int32_t, int, void*) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a04160826ad039d42c10edec456163fa7 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a04160826ad039d42c10edec456163fa7 */ public static function rd_kafka_conf_set_throttle_cb(?\FFI\CData $conf, $throttle_cb): void { @@ -738,7 +860,7 @@ public static function rd_kafka_conf_set_throttle_cb(?\FFI\CData $conf, $throttl * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $log_cb void(*)(const rd_kafka_t*, int, const char*, const char*) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a06ade2ca41f32eb82c6f7e3d4acbe19f + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a06ade2ca41f32eb82c6f7e3d4acbe19f */ public static function rd_kafka_conf_set_log_cb(?\FFI\CData $conf, $log_cb): void { @@ -751,13 +873,15 @@ public static function rd_kafka_conf_set_log_cb(?\FFI\CData $conf, $log_cb): voi *
  • rk - Kafka handle
  • *
  • json - String containing the statistics data in JSON format
  • *
  • json_len - Length of json string.
  • - *
  • opaque - Application-provided opaque.
  • + *
  • opaque - Application-provided opaque as set by rd_kafka_conf_set_opaque().
  • * - *

    If the application wishes to hold on to the json pointer and free it at a later time it must return 1 from the stats_cb. If the application returns 0 from the stats_cb then librdkafka will immediately free the json pointer.

    + *

    For more information on the format of json, see https://github.com/edenhill/librdkafka/wiki/Statistics

    + *

    If the application wishes to hold on to the json pointer and free it at a later time it must return 1 from the stats_cb. If the application returns 0 from the stats_cb then librdkafka will immediately free the json pointer.

    + *

    See STATISTICS.md for a full definition of the JSON object.

    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $stats_cb int(*)(rd_kafka_t*, char*, size_t, void*) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a597d00432e3ca22174d18e7e348fb766 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a597d00432e3ca22174d18e7e348fb766 */ public static function rd_kafka_conf_set_stats_cb(?\FFI\CData $conf, $stats_cb): void { @@ -767,6 +891,7 @@ public static function rd_kafka_conf_set_stats_cb(?\FFI\CData $conf, $stats_cb): /** *

    Set socket callback.

    *

    The socket callback is responsible for opening a socket according to the supplied domain, type and protocol. The socket shall be created with CLOEXEC set in a racefree fashion, if possible.

    + *

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    *

    Default:

    + *
    Remarks
    If RD_KAFKA_MSG_F_PARTITION is set in msgflags, the .partition field of the rkmessages is used instead of partition.
    + * + *
    Remarks
    This interface does NOT support setting message headers on the provided rkmessages.
    * @param \FFI\CData|null $rkt rd_kafka_topic_t* * @param int|null $partition int32_t * @param int|null $msgflags int @@ -2032,7 +2286,7 @@ public static function rd_kafka_producev(?\FFI\CData $rk, ...$args): int * @param int|null $message_cnt int * @return int|null int - the number of messages succesfully enqueued for producing. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a7ad15c71f228c47946500a0e5c6f88ed + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a7ad15c71f228c47946500a0e5c6f88ed */ public static function rd_kafka_produce_batch(?\FFI\CData $rkt, ?int $partition, ?int $msgflags, ?\FFI\CData $rkmessages, ?int $message_cnt): ?int { @@ -2041,12 +2295,18 @@ public static function rd_kafka_produce_batch(?\FFI\CData $rkt, ?int $partition, /** *

    Wait until all outstanding produce requests, et.al, are completed. This should typically be done prior to destroying a producer instance to make sure all queued and in-flight produce requests are completed before terminating.

    - *
    Remarks
    This function will call rd_kafka_poll() and thus trigger callbacks.
    + *
    Remarks
    This function will call rd_kafka_poll() and thus trigger callbacks.
    + *
    + * The linger.ms time will be ignored for the duration of the call, queued messages will be sent to the broker as soon as possible.
    + *
    + * If RD_KAFKA_EVENT_DR has been enabled (through rd_kafka_conf_set_events()) this function will not call rd_kafka_poll() but instead wait for the librdkafka-handled message count to reach zero. This requires the application to serve the event queue in a separate thread. In this mode only messages are counted, not other types of queued events.
    + * + *
    See also
    rd_kafka_outq_len()
    * @param \FFI\CData|null $rk rd_kafka_t* * @param int|null $timeout_ms int * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__TIMED_OUT if timeout_ms was reached before all outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aaff06c4372bce917c17f3c1a5d8b205d + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aaff06c4372bce917c17f3c1a5d8b205d */ public static function rd_kafka_flush(?\FFI\CData $rk, ?int $timeout_ms): int { @@ -2054,10 +2314,17 @@ public static function rd_kafka_flush(?\FFI\CData $rk, ?int $timeout_ms): int } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param int|null $purge_flags int - * @return int rd_kafka_resp_err_t + *

    Purge messages currently handled by the producer instance.

    + * + *

    The application will need to call rd_kafka_poll() or rd_kafka_flush() afterwards to serve the delivery report callbacks of the purged messages.

    + *

    Messages purged from internal queues fail with the delivery report error code set to RD_KAFKA_RESP_ERR__PURGE_QUEUE, while purged messages that are in-flight to or from the broker will fail with the error code set to RD_KAFKA_RESP_ERR__PURGE_INFLIGHT.

    + *
    Warning
    Purging messages that are in-flight to or from the broker will ignore any sub-sequent acknowledgement for these messages received from the broker, effectively making it impossible for the application to know if the messages were successfully produced or not. This may result in duplicate messages if the application retries these messages at a later time.
    + *
    Remarks
    This call may block for a short time while background thread queues are purged.
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param int|null $purge_flags int - Tells which messages to purge and how. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, RD_KAFKA_RESP_ERR__INVALID_ARG if the purge flags are invalid or unknown, RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if called on a non-producer client instance. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a59c28026ebf7a67df5954879ebab4707 */ public static function rd_kafka_purge(?\FFI\CData $rk, ?int $purge_flags): int { @@ -2072,15 +2339,15 @@ public static function rd_kafka_purge(?\FFI\CData $rk, ?int $purge_flags): int *
  • metadatap pointer to hold metadata result. The *metadatap pointer must be released with rd_kafka_metadata_destroy().
  • *
  • timeout_ms maximum response time before failing.
  • * - *

    Returns RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or other error code on error.

    + *
    Remarks
    Consumer: If all_topics is non-zero the Metadata response information may trigger a re-join if any subscribed topics have changed partition count or existence state.
    * @param \FFI\CData|null $rk rd_kafka_t* * @param int|null $all_topics int * @param \FFI\CData|null $only_rkt rd_kafka_topic_t* * @param \FFI\CData|null $metadatap const struct rd_kafka_metadata** * @param int|null $timeout_ms int - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or other error code on error. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a84bba4a4b13fdb515f1a22d6fd4f7344 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a84bba4a4b13fdb515f1a22d6fd4f7344 */ public static function rd_kafka_metadata(?\FFI\CData $rk, ?int $all_topics, ?\FFI\CData $only_rkt, ?\FFI\CData $metadatap, ?int $timeout_ms): int { @@ -2101,14 +2368,15 @@ public static function rd_kafka_metadata_destroy($metadata): void *

    group is an optional group name to describe, otherwise (NULL) all groups are returned.

    *

    timeout_ms is the (approximate) maximum time to wait for response from brokers and must be a positive value.

    * + *

    The grplistp remains untouched if any error code is returned, with the exception of RD_KAFKA_RESP_ERR__PARTIAL which behaves as RD_KAFKA_RESP_ERR__NO_ERROR (success) but with an incomplete group list.

    *
    See also
    Use rd_kafka_group_list_destroy() to release list memory.
    * @param \FFI\CData|null $rk rd_kafka_t* * @param string|null $group const char* * @param \FFI\CData|null $grplistp const struct rd_kafka_group_list** * @param int|null $timeout_ms int - * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__NO_ERROR on success and grplistp is updated to point to a newly allocated list of groups. Else returns an error code on failure and grplistp remains untouched. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__NO_ERROR on success and grplistp is updated to point to a newly allocated list of groups. RD_KAFKA_RESP_ERR__PARTIAL if not all brokers responded in time but at least one group is returned in grplistlp. RD_KAFKA_RESP_ERR__TIMED_OUT if no groups were returned in the given timeframe but not all brokers have yet responded, or if the list of brokers in the cluster could not be obtained within the given timeframe. RD_KAFKA_RESP_ERR__TRANSPORT if no brokers were found. Other error codes may also be returned from the request layer. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a6cfc79819453ecd4aa94fbae6dbbea0a + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6cfc79819453ecd4aa94fbae6dbbea0a */ public static function rd_kafka_list_groups(?\FFI\CData $rk, ?string $group, ?\FFI\CData $grplistp, ?int $timeout_ms): int { @@ -2131,12 +2399,13 @@ public static function rd_kafka_group_list_destroy(?\FFI\CData $grplist): void *

    brokerlist is a ,-separated list of brokers in the format: <broker1>,<broker2>,.. Where each broker is in either the host or URL based format: <host>[:<port>] <proto>://<host>[:port] <proto> is either PLAINTEXT, SSL, SASL, SASL_PLAINTEXT The two formats can be mixed but ultimately the value of the security.protocol config property decides what brokers are allowed.

    *

    Example: brokerlist = "broker1:10000,broker2" brokerlist = "SSL://broker3:9000,ssl://broker2"

    * - *
    Remarks
    Brokers may also be defined with the metadata.broker.list or bootstrap.servers configuration property (preferred method).
    + *
    Remarks
    Brokers may also be defined with the metadata.broker.list or bootstrap.servers configuration property (preferred method).
    + *
    Deprecated:
    Set bootstrap servers with the bootstrap.servers configuration property.
    * @param \FFI\CData|null $rk rd_kafka_t* * @param string|null $brokerlist const char* * @return int|null int - the number of brokers successfully added. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ab83da8da989fe41693d78d982c7ae6b7 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab83da8da989fe41693d78d982c7ae6b7 */ public static function rd_kafka_brokers_add(?\FFI\CData $rk, ?string $brokerlist): ?int { @@ -2151,7 +2420,7 @@ public static function rd_kafka_brokers_add(?\FFI\CData $rk, ?string $brokerlist * @param \FFI\CData|null $rk rd_kafka_t* * @param \FFI\CData|\Closure $func void(*)(const rd_kafka_t*, int, const char*, const char*) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9e4af9adee414af74c7817403f7c4a53 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9e4af9adee414af74c7817403f7c4a53 */ public static function rd_kafka_set_logger(?\FFI\CData $rk, $func): void { @@ -2159,12 +2428,13 @@ public static function rd_kafka_set_logger(?\FFI\CData $rk, $func): void } /** - *

    Specifies the maximum logging level produced by internal kafka logging and debugging.

    - *

    If the "debug" configuration property is set the level is automatically adjusted to LOG_DEBUG (7).

    + *

    Specifies the maximum logging level emitted by internal kafka logging and debugging.

    + *
    Deprecated:
    Set the "log_level" configuration property instead.
    + *
    Remarks
    If the "debug" configuration property is set the log level is automatically adjusted to LOG_DEBUG (7).
    * @param \FFI\CData|null $rk rd_kafka_t* * @param int|null $level int * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#acadeefced6bb60acd27e7a0dad553aa4 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acadeefced6bb60acd27e7a0dad553aa4 */ public static function rd_kafka_set_log_level(?\FFI\CData $rk, ?int $level): void { @@ -2184,11 +2454,14 @@ public static function rd_kafka_log_print(?\FFI\CData $rk, ?int $level, ?string } /** + *

    Builtin log sink: print to syslog.

    + *
    Remarks
    This logger is only available if librdkafka was built with syslog support.
    * @param \FFI\CData|null $rk const rd_kafka_t* * @param int|null $level int * @param string|null $fac const char* * @param string|null $buf const char* * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a748d5eaca75f94bb4ff4217ae824385d */ public static function rd_kafka_log_syslog(?\FFI\CData $rk, ?int $level, ?string $fac, ?string $buf): void { @@ -2197,12 +2470,19 @@ public static function rd_kafka_log_syslog(?\FFI\CData $rk, ?int $level, ?string /** *

    Returns the current out queue length.

    - *

    The out queue contains messages waiting to be sent to, or acknowledged by, the broker.

    - *

    An application should wait for this queue to reach zero before terminating to make sure outstanding requests (such as offset commits) are fully processed.

    + *

    The out queue length is the sum of:

    + *

    An application should wait for the return value of this function to reach zero before terminating to make sure outstanding messages, requests (such as offset commits), callbacks and events are fully processed. See rd_kafka_flush().

    + * + *
    See also
    rd_kafka_flush()
    * @param \FFI\CData|null $rk rd_kafka_t* - ) - * @return int|null int - number of messages in the out queue. + * @return int|null int - number of messages and events waiting in queues. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ad4b3b7659cf9a79d3353810d6b625bb7 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad4b3b7659cf9a79d3353810d6b625bb7 */ public static function rd_kafka_outq_len(?\FFI\CData $rk): ?int { @@ -2215,7 +2495,7 @@ public static function rd_kafka_outq_len(?\FFI\CData $rk): ?int * @param \FFI\CData|null $fp FILE* * @param \FFI\CData|null $rk rd_kafka_t* * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a27a46f74ec4ccc9c0b36dbcf546908a1 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a27a46f74ec4ccc9c0b36dbcf546908a1 */ public static function rd_kafka_dump(?\FFI\CData $fp, ?\FFI\CData $rk): void { @@ -2227,7 +2507,7 @@ public static function rd_kafka_dump(?\FFI\CData $fp, ?\FFI\CData $rk): void *

    Used by regression tests.

    * @return int|null int - ) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a0901699375c972b807ba5255773f017f + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a0901699375c972b807ba5255773f017f */ public static function rd_kafka_thread_cnt(): ?int { @@ -2236,11 +2516,12 @@ public static function rd_kafka_thread_cnt(): ?int /** *

    Wait for all rd_kafka_t objects to be destroyed.

    - *

    Returns 0 if all kafka objects are now destroyed, or -1 if the timeout was reached. Since rd_kafka_destroy() is an asynch operation the rd_kafka_wait_destroyed() function can be used for applications where a clean shutdown is required.

    + *

    Returns 0 if all kafka objects are now destroyed, or -1 if the timeout was reached.

    + *
    Remarks
    This function is deprecated.
    * @param int|null $timeout_ms int - ) * @return int|null int * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa90f2c92a382dbd0a090d40caa73356d + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa90f2c92a382dbd0a090d40caa73356d */ public static function rd_kafka_wait_destroyed(?int $timeout_ms): ?int { @@ -2248,8 +2529,10 @@ public static function rd_kafka_wait_destroyed(?int $timeout_ms): ?int } /** - * @return int|null int + *

    Run librdkafka's built-in unit-tests.

    + * @return int|null int - ) - the number of failures, or 0 if all tests passed. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab932d463be030a5e83ebfc5186ff20b8 */ public static function rd_kafka_unittest(): ?int { @@ -2262,7 +2545,7 @@ public static function rd_kafka_unittest(): ?int * @param \FFI\CData|null $rk rd_kafka_t* - ) * @return int rd_kafka_resp_err_t * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9bfa0a1dd3f866cbf0c82fc089bd7904 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9bfa0a1dd3f866cbf0c82fc089bd7904 */ public static function rd_kafka_poll_set_consumer(?\FFI\CData $rk): int { @@ -2274,7 +2557,7 @@ public static function rd_kafka_poll_set_consumer(?\FFI\CData $rk): int * @param \FFI\CData|null $rkev const rd_kafka_event_t* - ) * @return int|null rd_kafka_event_type_t - the event type for the given event. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a108de3729a4aa609a72a458a9de02d1d + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a108de3729a4aa609a72a458a9de02d1d */ public static function rd_kafka_event_type(?\FFI\CData $rkev): ?int { @@ -2286,7 +2569,7 @@ public static function rd_kafka_event_type(?\FFI\CData $rkev): ?int * @param \FFI\CData|null $rkev const rd_kafka_event_t* - ) * @return string|null const char* - the event type's name for the given event. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a608193d1fb486f78c79497c8c5b63866 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a608193d1fb486f78c79497c8c5b63866 */ public static function rd_kafka_event_name(?\FFI\CData $rkev): ?string { @@ -2300,7 +2583,7 @@ public static function rd_kafka_event_name(?\FFI\CData $rkev): ?string * As a convenience it is okay to pass rkev as NULL in which case no action is performed. * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#af1835c85aa202caf629861f29f475099 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af1835c85aa202caf629861f29f475099 */ public static function rd_kafka_event_destroy(?\FFI\CData $rkev): void { @@ -2313,11 +2596,13 @@ public static function rd_kafka_event_destroy(?\FFI\CData $rkev): void *
  • RD_KAFKA_EVENT_FETCH (1 message)
  • *
  • RD_KAFKA_EVENT_DR (>=1 message(s))
  • * - *
    Remarks
    The returned message(s) MUST NOT be freed with rd_kafka_message_destroy().
    + *
    Remarks
    The returned message(s) MUST NOT be freed with rd_kafka_message_destroy().
    + *
    + * on_consume() interceptor may be called from this function prior to passing message to application.
    * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) * @return \FFI\CData|null const rd_kafka_message_t* - the next message from an event. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a3a855eb7bdf17f5797d4911362a5fc7c + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a3a855eb7bdf17f5797d4911362a5fc7c */ public static function rd_kafka_event_message_next(?\FFI\CData $rkev): ?\FFI\CData { @@ -2330,12 +2615,14 @@ public static function rd_kafka_event_message_next(?\FFI\CData $rkev): ?\FFI\CDa *
  • RD_KAFKA_EVENT_FETCH (1 message)
  • *
  • RD_KAFKA_EVENT_DR (>=1 message(s))
  • * + * + *
    Remarks
    on_consume() interceptor may be called from this function prior to passing message to application.
    * @param \FFI\CData|null $rkev rd_kafka_event_t* * @param \FFI\CData|null $rkmessages const rd_kafka_message_t** * @param int|null $size size_t * @return int|null size_t - the number of messages extracted. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a18a17000ebe58eabcdafab37924442b8 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a18a17000ebe58eabcdafab37924442b8 */ public static function rd_kafka_event_message_array(?\FFI\CData $rkev, ?\FFI\CData $rkmessages, ?int $size): ?int { @@ -2350,7 +2637,7 @@ public static function rd_kafka_event_message_array(?\FFI\CData $rkev, ?\FFI\CDa * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) * @return int|null size_t - the number of remaining messages in the event. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a61d9d106c8956f379bb77d393b8acf90 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a61d9d106c8956f379bb77d393b8acf90 */ public static function rd_kafka_event_message_count(?\FFI\CData $rkev): ?int { @@ -2358,13 +2645,14 @@ public static function rd_kafka_event_message_count(?\FFI\CData $rkev): ?int } /** + *

    Use rd_kafka_event_error_is_fatal() to detect if this is a fatal error.

    *

    Event types:

    * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) * @return int rd_kafka_resp_err_t - the error code for the event. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa28b7d6bb4885843f9a8b9bafa0e15a5 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa28b7d6bb4885843f9a8b9bafa0e15a5 */ public static function rd_kafka_event_error(?\FFI\CData $rkev): int { @@ -2378,7 +2666,7 @@ public static function rd_kafka_event_error(?\FFI\CData $rkev): int * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) * @return string|null const char* - the error string (if any). An application should check that rd_kafka_event_error() returns non-zero before calling this function. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ad76a1b2d6c4f1727725b075678b88793 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad76a1b2d6c4f1727725b075678b88793 */ public static function rd_kafka_event_error_string(?\FFI\CData $rkev): ?string { @@ -2386,9 +2674,14 @@ public static function rd_kafka_event_error_string(?\FFI\CData $rkev): ?string } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return int|null int + *

    Event types:

    + *
    See also
    rd_kafka_fatal_error()
    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return int|null int - 1 if the error is a fatal error, else 0. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a1fe9d1e960a59774dbd5a0336927de7d */ public static function rd_kafka_event_error_is_fatal(?\FFI\CData $rkev): ?int { @@ -2397,12 +2690,23 @@ public static function rd_kafka_event_error_is_fatal(?\FFI\CData $rkev): ?int /** *

    Event types:

    * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) - * @return \FFI\CData|object|string|null void* - the user opaque (if any) + * @return \FFI\CData|object|string|null void* - the event opaque (if any) as passed to rd_kafka_commit() (et.al) or rd_kafka_AdminOptions_set_opaque(), depending on event type. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a8650ed2a19108d490a65c9aff3e66525 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8650ed2a19108d490a65c9aff3e66525 */ public static function rd_kafka_event_opaque(?\FFI\CData $rkev) { @@ -2420,7 +2724,7 @@ public static function rd_kafka_event_opaque(?\FFI\CData $rkev) * @param \FFI\CData|null $level int* * @return int|null int - 0 on success or -1 if unsupported event type. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a535efaa16772642d724bedca414c17c7 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a535efaa16772642d724bedca414c17c7 */ public static function rd_kafka_event_log(?\FFI\CData $rkev, ?\FFI\CData $fac, ?\FFI\CData $str, ?\FFI\CData $level): ?int { @@ -2428,9 +2732,16 @@ public static function rd_kafka_event_log(?\FFI\CData $rkev, ?\FFI\CData $fac, ? } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return string|null const char* + *

    Extract stats from the event.

    + *

    Event types:

    + * + *
    Remarks
    the returned string will be freed automatically along with the event object
    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return string|null const char* - stats json string. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ae8572e38ffb452f96d13a2d046fb71d9 */ public static function rd_kafka_event_stats(?\FFI\CData $rkev): ?string { @@ -2446,7 +2757,7 @@ public static function rd_kafka_event_stats(?\FFI\CData $rkev): ?string * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) * @return \FFI\CData|null rd_kafka_topic_partition_list_t* - the topic partition list from the event. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#abc8f98c9b35be497251fb8515e9e6633 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abc8f98c9b35be497251fb8515e9e6633 */ public static function rd_kafka_event_topic_partition_list(?\FFI\CData $rkev): ?\FFI\CData { @@ -2459,7 +2770,7 @@ public static function rd_kafka_event_topic_partition_list(?\FFI\CData $rkev): ? * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) * @return \FFI\CData|null rd_kafka_topic_partition_t* - a newly allocated topic_partition container, if applicable for the event type, else NULL. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#abf4cce46d6e566dd35865c0451b76afe + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abf4cce46d6e566dd35865c0451b76afe */ public static function rd_kafka_event_topic_partition(?\FFI\CData $rkev): ?\FFI\CData { @@ -2467,9 +2778,13 @@ public static function rd_kafka_event_topic_partition(?\FFI\CData $rkev): ?\FFI\ } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_CreateTopics_result_t* + *

    Get CreateTopics result.

    + * + *

    Event types: RD_KAFKA_EVENT_CREATETOPICS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_CreateTopics_result_t* - the result of a CreateTopics request, or NULL if event is of different type. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a0aa821ae098ca3b88c1a7fdec25c645e */ public static function rd_kafka_event_CreateTopics_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -2477,9 +2792,13 @@ public static function rd_kafka_event_CreateTopics_result(?\FFI\CData $rkev): ?\ } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_DeleteTopics_result_t* + *

    Get DeleteTopics result.

    + * + *

    Event types: RD_KAFKA_EVENT_DELETETOPICS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_DeleteTopics_result_t* - the result of a DeleteTopics request, or NULL if event is of different type. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a4cd8e837f75589842581110890740f65 */ public static function rd_kafka_event_DeleteTopics_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -2487,9 +2806,13 @@ public static function rd_kafka_event_DeleteTopics_result(?\FFI\CData $rkev): ?\ } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_CreatePartitions_result_t* + *

    Get CreatePartitions result.

    + * + *

    Event types: RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_CreatePartitions_result_t* - the result of a CreatePartitions request, or NULL if event is of different type. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab4f667a48124ac6be89a2b9bcd5f8d47 */ public static function rd_kafka_event_CreatePartitions_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -2497,9 +2820,13 @@ public static function rd_kafka_event_CreatePartitions_result(?\FFI\CData $rkev) } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_AlterConfigs_result_t* + *

    Get AlterConfigs result.

    + * + *

    Event types: RD_KAFKA_EVENT_ALTERCONFIGS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_AlterConfigs_result_t* - the result of a AlterConfigs request, or NULL if event is of different type. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a97d0666a40b2bd014d564de332eacf3f */ public static function rd_kafka_event_AlterConfigs_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -2507,9 +2834,13 @@ public static function rd_kafka_event_AlterConfigs_result(?\FFI\CData $rkev): ?\ } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_DescribeConfigs_result_t* + *

    Get DescribeConfigs result.

    + * + *

    Event types: RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_DescribeConfigs_result_t* - the result of a DescribeConfigs request, or NULL if event is of different type. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a0e1a04881f39aa850cd47b43583f5a90 */ public static function rd_kafka_event_DescribeConfigs_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -2519,12 +2850,13 @@ public static function rd_kafka_event_DescribeConfigs_result(?\FFI\CData $rkev): /** *

    Poll a queue for an event for max timeout_ms.

    * - *
    Remarks
    Use rd_kafka_event_destroy() to free the event.
    + *
    Remarks
    Use rd_kafka_event_destroy() to free the event.
    + *
    See also
    rd_kafka_conf_set_background_event_cb()
    * @param \FFI\CData|null $rkqu rd_kafka_queue_t* * @param int|null $timeout_ms int * @return \FFI\CData|null rd_kafka_event_t* - an event, or NULL. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a2f147ed1c554c9048893fb1adde86dfa + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a2f147ed1c554c9048893fb1adde86dfa */ public static function rd_kafka_queue_poll(?\FFI\CData $rkqu, ?int $timeout_ms): ?\FFI\CData { @@ -2534,12 +2866,15 @@ public static function rd_kafka_queue_poll(?\FFI\CData $rkqu, ?int $timeout_ms): /** *

    Poll a queue for events served through callbacks for max timeout_ms.

    * - *
    Remarks
    This API must only be used for queues with callbacks registered for all expected event types. E.g., not a message queue.
    + *
    Remarks
    This API must only be used for queues with callbacks registered for all expected event types. E.g., not a message queue.
    + *
    + * Also see rd_kafka_conf_set_background_event_cb() for triggering event callbacks from a librdkafka-managed background thread.
    + *
    See also
    rd_kafka_conf_set_background_event_cb()
    * @param \FFI\CData|null $rkqu rd_kafka_queue_t* * @param int|null $timeout_ms int * @return int|null int - the number of events served. * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a13d80084f20a2800e863b97e465ce98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a13d80084f20a2800e863b97e465ce98e */ public static function rd_kafka_queue_poll_callback(?\FFI\CData $rkqu, ?int $timeout_ms): ?int { @@ -2547,12 +2882,17 @@ public static function rd_kafka_queue_poll_callback(?\FFI\CData $rkqu, ?int $tim } /** - * @param \FFI\CData|null $conf rd_kafka_conf_t* - * @param \FFI\CData|object|string|null $plug_opaquep void** - * @param \FFI\CData|null $errstr char* + *

    Plugin's configuration initializer method called each time the library is referenced from configuration (even if previously loaded by another client instance).

    + *
    Remarks
    This method MUST be implemented by plugins and have the symbol name conf_init
    + * + *
    Remarks
    A plugin may add an on_conf_destroy() interceptor to clean up plugin-specific resources created in the plugin's conf_init() method.
    + * @param \FFI\CData|null $conf rd_kafka_conf_t* - Configuration set up to this point. + * @param \FFI\CData|object|string|null $plug_opaquep void** - Plugin can set this pointer to a per-configuration opaque pointer. + * @param \FFI\CData|null $errstr char* - String buffer of size errstr_size where plugin must write a human readable error string in the case the initializer fails (returns non-zero). - Maximum space (including \0) in errstr. * @param int|null $errstr_size size_t - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6d98d642466324e3b188accb2623387f */ public static function rd_kafka_plugin_f_conf_init_t(?\FFI\CData $conf, $plug_opaquep, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -2560,14 +2900,16 @@ public static function rd_kafka_plugin_f_conf_init_t(?\FFI\CData $conf, $plug_op } /** - * @param \FFI\CData|null $conf rd_kafka_conf_t* - * @param string|null $name const char* - * @param string|null $val const char* - * @param \FFI\CData|null $errstr char* - * @param int|null $errstr_size size_t - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_conf_res_t + *

    on_conf_set() is called from rd_kafka_*_conf_set() in the order the interceptors were added.

    + * @param \FFI\CData|null $conf rd_kafka_conf_t* - Configuration object. + * @param string|null $name const char* - The configuration property to set. + * @param string|null $val const char* - The configuration value to set, or NULL for reverting to default in which case the previous value should be freed. + * @param \FFI\CData|null $errstr char* - A human readable error string in case the interceptor fails. + * @param int|null $errstr_size size_t - Maximum space (including \0) in errstr. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @return int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK if the property was known and successfully handled by the interceptor, RD_KAFKA_CONF_INVALID if the property was handled by the interceptor but the value was invalid, or RD_KAFKA_CONF_UNKNOWN if the interceptor did not handle this property, in which case the property is passed on on the interceptor in the chain, finally ending up at the built-in configuration handler. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6c1f90ab0ce78b28daa9ee27a682da10 */ public static function rd_kafka_interceptor_f_on_conf_set_t(?\FFI\CData $conf, ?string $name, ?string $val, ?\FFI\CData $errstr, ?int $errstr_size, $ic_opaque): int { @@ -2575,13 +2917,18 @@ public static function rd_kafka_interceptor_f_on_conf_set_t(?\FFI\CData $conf, ? } /** - * @param \FFI\CData|null $new_conf rd_kafka_conf_t* - * @param \FFI\CData|null $old_conf const rd_kafka_conf_t* - * @param int|null $filter_cnt size_t + *

    on_conf_dup() is called from rd_kafka_conf_dup() in the order the interceptors were added and is used to let an interceptor re-register its conf interecptors with a new opaque value. The on_conf_dup() method is called prior to the configuration from old_conf being copied to new_conf.

    + * + * + *
    Remarks
    No on_conf_* interceptors are copied to the new configuration object on rd_kafka_conf_dup().
    + * @param \FFI\CData|null $new_conf rd_kafka_conf_t* - New configuration object. + * @param \FFI\CData|null $old_conf const rd_kafka_conf_t* - Old configuration object to copy properties from. + * @param int|null $filter_cnt size_t - Number of property names to filter in filter. - Property names to filter out (ignore) when setting up new_conf. * @param \FFI\CData|null $filter const char** - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure (which is logged but otherwise ignored). * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a166d8f2aea67064516b7995869af2411 */ public static function rd_kafka_interceptor_f_on_conf_dup_t(?\FFI\CData $new_conf, ?\FFI\CData $old_conf, ?int $filter_cnt, ?\FFI\CData $filter, $ic_opaque): int { @@ -2589,9 +2936,11 @@ public static function rd_kafka_interceptor_f_on_conf_dup_t(?\FFI\CData $new_con } /** - * @param \FFI\CData|object|string|null $ic_opaque void* + *

    on_conf_destroy() is called from rd_kafka_*_conf_destroy() in the order the interceptors were added.

    + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). * @return int rd_kafka_resp_err_t * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#afd1b354fab7374b77cf5e43c8a04c479 */ public static function rd_kafka_interceptor_f_on_conf_destroy_t($ic_opaque): int { @@ -2599,13 +2948,18 @@ public static function rd_kafka_interceptor_f_on_conf_destroy_t($ic_opaque): int } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $conf const rd_kafka_conf_t* - * @param \FFI\CData|object|string|null $ic_opaque void* - * @param \FFI\CData|null $errstr char* - * @param int|null $errstr_size size_t - * @return int rd_kafka_resp_err_t + *

    on_new() is called from rd_kafka_new() prior toreturning the newly created client instance to the application.

    + * + * + *
    Warning
    The rk client instance will not be fully set up when this interceptor is called and the interceptor MUST NOT call any other rk-specific APIs than rd_kafka_interceptor_add..().
    + * @param \FFI\CData|null $rk rd_kafka_t* - The client instance. + * @param \FFI\CData|null $conf const rd_kafka_conf_t* - The client instance's final configuration. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @param \FFI\CData|null $errstr char* - A human readable error string in case the interceptor fails. + * @param int|null $errstr_size size_t - Maximum space (including \0) in errstr. + * @return int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a1dd51e3656e61b4a8d4475a3e0deaa10 */ public static function rd_kafka_interceptor_f_on_new_t(?\FFI\CData $rk, ?\FFI\CData $conf, $ic_opaque, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -2613,10 +2967,12 @@ public static function rd_kafka_interceptor_f_on_new_t(?\FFI\CData $rk, ?\FFI\CD } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|object|string|null $ic_opaque void* + *

    on_destroy() is called from rd_kafka_destroy() or (rd_kafka_new() if rd_kafka_new() fails during initialization).

    + * @param \FFI\CData|null $rk rd_kafka_t* - The client instance. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). * @return int rd_kafka_resp_err_t * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aaea78073e1a749ee0a8eaedf2f7c21d4 */ public static function rd_kafka_interceptor_f_on_destroy_t(?\FFI\CData $rk, $ic_opaque): int { @@ -2624,11 +2980,19 @@ public static function rd_kafka_interceptor_f_on_destroy_t(?\FFI\CData $rk, $ic_ } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $rkmessage rd_kafka_message_t* - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    on_send() is called from rd_kafka_produce*() (et.al) prior to the partitioner being called.

    + * + *
    Remarks
    This interceptor is only used by producer instances.
    + *
    + * The rkmessage object is NOT mutable and MUST NOT be modified by the interceptor.
    + *
    + * If the partitioner fails or an unknown partition was specified, the on_acknowledgement() interceptor chain will be called from within the rd_kafka_produce*() call to maintain send-acknowledgement symmetry.
    + * @param \FFI\CData|null $rk rd_kafka_t* - The client instance. + * @param \FFI\CData|null $rkmessage rd_kafka_message_t* - The message being produced. Immutable. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @return int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a39c970fa80d4681d93332d3af4df5916 */ public static function rd_kafka_interceptor_f_on_send_t(?\FFI\CData $rk, ?\FFI\CData $rkmessage, $ic_opaque): int { @@ -2636,11 +3000,18 @@ public static function rd_kafka_interceptor_f_on_send_t(?\FFI\CData $rk, ?\FFI\C } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $rkmessage rd_kafka_message_t* - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    on_acknowledgement() is called to inform interceptors that a message was succesfully delivered or permanently failed delivery. The interceptor chain is called from internal librdkafka background threads, or rd_kafka_produce*() if the partitioner failed.

    + * + *
    Remarks
    This interceptor is only used by producer instances.
    + *
    + * The rkmessage object is NOT mutable and MUST NOT be modified by the interceptor.
    + *
    Warning
    The on_acknowledgement() method may be called from internal librdkafka threads. An on_acknowledgement() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
    + * @param \FFI\CData|null $rk rd_kafka_t* - The client instance. + * @param \FFI\CData|null $rkmessage rd_kafka_message_t* - The message being produced. Immutable. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @return int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa654a84f20567004ab1df85f97f0ebf2 */ public static function rd_kafka_interceptor_f_on_acknowledgement_t(?\FFI\CData $rk, ?\FFI\CData $rkmessage, $ic_opaque): int { @@ -2648,11 +3019,17 @@ public static function rd_kafka_interceptor_f_on_acknowledgement_t(?\FFI\CData $ } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $rkmessage rd_kafka_message_t* - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    on_consume() is called just prior to passing the message to the application in rd_kafka_consumer_poll(), rd_kafka_consume*(), the event interface, etc.

    + * + *
    Remarks
    This interceptor is only used by consumer instances.
    + *
    + * The rkmessage object is NOT mutable and MUST NOT be modified by the interceptor.
    + * @param \FFI\CData|null $rk rd_kafka_t* - The client instance. + * @param \FFI\CData|null $rkmessage rd_kafka_message_t* - The message being consumed. Immutable. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @return int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ae31db94674004860fa545f3745497cfe */ public static function rd_kafka_interceptor_f_on_consume_t(?\FFI\CData $rk, ?\FFI\CData $rkmessage, $ic_opaque): int { @@ -2660,12 +3037,17 @@ public static function rd_kafka_interceptor_f_on_consume_t(?\FFI\CData $rk, ?\FF } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $offsets const rd_kafka_topic_partition_list_t* - * @param int $err rd_kafka_resp_err_t - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    on_commit() is called on completed or failed offset commit. It is called from internal librdkafka threads.

    + * + *
    Remarks
    This interceptor is only used by consumer instances.
    + *
    Warning
    The on_commit() interceptor is called from internal librdkafka threads. An on_commit() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
    + * @param \FFI\CData|null $rk rd_kafka_t* - The client instance. + * @param \FFI\CData|null $offsets const rd_kafka_topic_partition_list_t* - List of topic+partition+offset+error that were committed. The error message of each partition should be checked for error. + * @param int $err rd_kafka_resp_err_t - The commit error, if any. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @return int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a89c817cb2dd1a7b6a414a2f62c01fdcf */ public static function rd_kafka_interceptor_f_on_commit_t(?\FFI\CData $rk, ?\FFI\CData $offsets, int $err, $ic_opaque): int { @@ -2673,17 +3055,21 @@ public static function rd_kafka_interceptor_f_on_commit_t(?\FFI\CData $rk, ?\FFI } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param int|null $sockfd int - * @param string|null $brokername const char* - * @param int|null $brokerid int32_t - * @param int|null $ApiKey int16_t - * @param int|null $ApiVersion int16_t - * @param int|null $CorrId int32_t - * @param int|null $size size_t - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t - * @since 1.0.0 of librdkafka + *

    on_request_sent() is called when a request has been fully written to a broker TCP connections socket.

    + * + *
    Warning
    The on_request_sent() interceptor is called from internal librdkafka broker threads. An on_request_sent() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
    + * @param \FFI\CData|null $rk rd_kafka_t* - The client instance. + * @param int|null $sockfd int - Socket file descriptor. + * @param string|null $brokername const char* - Broker request is being sent to. + * @param int|null $brokerid int32_t - Broker request is being sent to. + * @param int|null $ApiKey int16_t - Kafka protocol request type. + * @param int|null $ApiVersion int16_t - Kafka protocol request type version. + * @param int|null $CorrId int32_t - Kafka protocol request correlation id. + * @param int|null $size size_t - Size of request. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @return int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored. + * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a0b7f9b39e862802a6ccbac07bf848064 */ public static function rd_kafka_interceptor_f_on_request_sent_t(?\FFI\CData $rk, ?int $sockfd, ?string $brokername, ?int $brokerid, ?int $ApiKey, ?int $ApiVersion, ?int $CorrId, ?int $size, $ic_opaque): int { @@ -2691,12 +3077,14 @@ public static function rd_kafka_interceptor_f_on_request_sent_t(?\FFI\CData $rk, } /** - * @param \FFI\CData|null $conf rd_kafka_conf_t* - * @param string|null $ic_name const char* - * @param \FFI\CData|\Closure $on_conf_set rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t*)(rd_kafka_conf_t*, const char*, const char*, char*, size_t, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    Append an on_conf_set() interceptor.

    + * @param \FFI\CData|null $conf rd_kafka_conf_t* - Configuration object. + * @param string|null $ic_name const char* - Interceptor name, used in logging. + * @param \FFI\CData|\Closure $on_conf_set rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t*)(rd_kafka_conf_t*, const char*, const char*, char*, size_t, void*) - Function pointer. + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a97bdeb12b99da10eff18767b3cadbfd2 */ public static function rd_kafka_conf_interceptor_add_on_conf_set(?\FFI\CData $conf, ?string $ic_name, $on_conf_set, $ic_opaque): int { @@ -2704,12 +3092,14 @@ public static function rd_kafka_conf_interceptor_add_on_conf_set(?\FFI\CData $co } /** - * @param \FFI\CData|null $conf rd_kafka_conf_t* - * @param string|null $ic_name const char* - * @param \FFI\CData|\Closure $on_conf_dup rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t*)(rd_kafka_conf_t*, const rd_kafka_conf_t*, size_t, const char**, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    Append an on_conf_dup() interceptor.

    + * @param \FFI\CData|null $conf rd_kafka_conf_t* - Configuration object. + * @param string|null $ic_name const char* - Interceptor name, used in logging. + * @param \FFI\CData|\Closure $on_conf_dup rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t*)(rd_kafka_conf_t*, const rd_kafka_conf_t*, size_t, const char**, void*) - Function pointer. + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a375ce703f6247377305dc6a5dfc84429 */ public static function rd_kafka_conf_interceptor_add_on_conf_dup(?\FFI\CData $conf, ?string $ic_name, $on_conf_dup, $ic_opaque): int { @@ -2717,12 +3107,17 @@ public static function rd_kafka_conf_interceptor_add_on_conf_dup(?\FFI\CData $co } /** - * @param \FFI\CData|null $conf rd_kafka_conf_t* - * @param string|null $ic_name const char* - * @param \FFI\CData|\Closure $on_conf_destroy rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t*)(void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    Append an on_conf_destroy() interceptor.

    + * + * + *
    Remarks
    Multiple on_conf_destroy() interceptors are allowed to be added to the same configuration object.
    + * @param \FFI\CData|null $conf rd_kafka_conf_t* - Configuration object. + * @param string|null $ic_name const char* - Interceptor name, used in logging. + * @param \FFI\CData|\Closure $on_conf_destroy rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t*)(void*) - Function pointer. + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a919ebd487a2717d6cbdf0d25102ea8ff */ public static function rd_kafka_conf_interceptor_add_on_conf_destroy(?\FFI\CData $conf, ?string $ic_name, $on_conf_destroy, $ic_opaque): int { @@ -2730,12 +3125,18 @@ public static function rd_kafka_conf_interceptor_add_on_conf_destroy(?\FFI\CData } /** - * @param \FFI\CData|null $conf rd_kafka_conf_t* - * @param string|null $ic_name const char* - * @param \FFI\CData|\Closure $on_new rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t*)(rd_kafka_t*, const rd_kafka_conf_t*, void*, char*, size_t) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    Append an on_new() interceptor.

    + * + *
    Remarks
    Since the on_new() interceptor is added to the configuration object it may be copied by rd_kafka_conf_dup(). An interceptor implementation must thus be able to handle the same interceptor,ic_opaque tuple to be used by multiple client instances.
    + *
    + * An interceptor plugin should check the return value to make sure it has not already been added.
    + * @param \FFI\CData|null $conf rd_kafka_conf_t* - Configuration object. + * @param string|null $ic_name const char* - Interceptor name, used in logging. + * @param \FFI\CData|\Closure $on_new rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t*)(rd_kafka_t*, const rd_kafka_conf_t*, void*, char*, size_t) - Function pointer. + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a002ef1d350176725b6e9ba93548edf40 */ public static function rd_kafka_conf_interceptor_add_on_new(?\FFI\CData $conf, ?string $ic_name, $on_new, $ic_opaque): int { @@ -2743,12 +3144,14 @@ public static function rd_kafka_conf_interceptor_add_on_new(?\FFI\CData $conf, ? } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $ic_name const char* - * @param \FFI\CData|\Closure $on_destroy rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_destroy_t*)(rd_kafka_t*, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    Append an on_destroy() interceptor.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $ic_name const char* - Interceptor name, used in logging. + * @param \FFI\CData|\Closure $on_destroy rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_destroy_t*)(rd_kafka_t*, void*) - Function pointer. + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a11b835cb99dc1cfa03c52465b9837a47 */ public static function rd_kafka_interceptor_add_on_destroy(?\FFI\CData $rk, ?string $ic_name, $on_destroy, $ic_opaque): int { @@ -2756,12 +3159,14 @@ public static function rd_kafka_interceptor_add_on_destroy(?\FFI\CData $rk, ?str } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $ic_name const char* - * @param \FFI\CData|\Closure $on_send rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    Append an on_send() interceptor.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $ic_name const char* - Interceptor name, used in logging. + * @param \FFI\CData|\Closure $on_send rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - Function pointer. + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af6fb1b8884444c63b6930a7bd6122cb6 */ public static function rd_kafka_interceptor_add_on_send(?\FFI\CData $rk, ?string $ic_name, $on_send, $ic_opaque): int { @@ -2769,12 +3174,14 @@ public static function rd_kafka_interceptor_add_on_send(?\FFI\CData $rk, ?string } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $ic_name const char* - * @param \FFI\CData|\Closure $on_acknowledgement rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    Append an on_acknowledgement() interceptor.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $ic_name const char* - Interceptor name, used in logging. + * @param \FFI\CData|\Closure $on_acknowledgement rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - Function pointer. + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abaf475ab47fff47ab796be1cecbbd370 */ public static function rd_kafka_interceptor_add_on_acknowledgement(?\FFI\CData $rk, ?string $ic_name, $on_acknowledgement, $ic_opaque): int { @@ -2782,12 +3189,14 @@ public static function rd_kafka_interceptor_add_on_acknowledgement(?\FFI\CData $ } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $ic_name const char* - * @param \FFI\CData|\Closure $on_consume rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    Append an on_consume() interceptor.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $ic_name const char* - Interceptor name, used in logging. + * @param \FFI\CData|\Closure $on_consume rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - Function pointer. + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a0e722a9930897bba81b33de5fe95b6ed */ public static function rd_kafka_interceptor_add_on_consume(?\FFI\CData $rk, ?string $ic_name, $on_consume, $ic_opaque): int { @@ -2795,12 +3204,14 @@ public static function rd_kafka_interceptor_add_on_consume(?\FFI\CData $rk, ?str } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $ic_name const char* + *

    Append an on_commit() interceptor.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $ic_name const char* - Interceptor name, used in logging. * @param \FFI\CData|\Closure $on_commit rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t*)(rd_kafka_t*, const rd_kafka_topic_partition_list_t*, rd_kafka_resp_err_t, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a0d3e29337dbf11762b5a7e492b77b781 */ public static function rd_kafka_interceptor_add_on_commit(?\FFI\CData $rk, ?string $ic_name, $on_commit, $ic_opaque): int { @@ -2808,12 +3219,14 @@ public static function rd_kafka_interceptor_add_on_commit(?\FFI\CData $rk, ?stri } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $ic_name const char* + *

    Append an on_request_sent() interceptor.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $ic_name const char* - Interceptor name, used in logging. * @param \FFI\CData|\Closure $on_request_sent rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t*)(rd_kafka_t*, int, const char*, int32_t, int16_t, int16_t, int32_t, size_t, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a444032455f98839bef2fb76d15f663c3 */ public static function rd_kafka_interceptor_add_on_request_sent(?\FFI\CData $rk, ?string $ic_name, $on_request_sent, $ic_opaque): int { @@ -2821,9 +3234,11 @@ public static function rd_kafka_interceptor_add_on_request_sent(?\FFI\CData $rk, } /** - * @param \FFI\CData|null $topicres const rd_kafka_topic_result_t* - * @return int rd_kafka_resp_err_t + *

    Topic result provides per-topic operation result information.

    + * @param \FFI\CData|null $topicres const rd_kafka_topic_result_t* - ) + * @return int rd_kafka_resp_err_t - the error code for the given topic result. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a962bb4f4bcd8cf12a9aa9ef361ed7526 */ public static function rd_kafka_topic_result_error(?\FFI\CData $topicres): int { @@ -2831,9 +3246,11 @@ public static function rd_kafka_topic_result_error(?\FFI\CData $topicres): int } /** - * @param \FFI\CData|null $topicres const rd_kafka_topic_result_t* - * @return string|null const char* + *
    Remarks
    lifetime of the returned string is the same as the topicres.
    + * @param \FFI\CData|null $topicres const rd_kafka_topic_result_t* - ) + * @return string|null const char* - the human readable error string for the given topic result, or NULL if there was no error. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a1e6596b6138eff2342da6e5c56ce9133 */ public static function rd_kafka_topic_result_error_string(?\FFI\CData $topicres): ?string { @@ -2841,9 +3258,11 @@ public static function rd_kafka_topic_result_error_string(?\FFI\CData $topicres) } /** - * @param \FFI\CData|null $topicres const rd_kafka_topic_result_t* - * @return string|null const char* + *
    Remarks
    lifetime of the returned string is the same as the topicres.
    + * @param \FFI\CData|null $topicres const rd_kafka_topic_result_t* - ) + * @return string|null const char* - the name of the topic for the given topic result. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a4073e9165eda071afc6f2af6a824e967 */ public static function rd_kafka_topic_result_name(?\FFI\CData $topicres): ?string { @@ -2851,10 +3270,15 @@ public static function rd_kafka_topic_result_name(?\FFI\CData $topicres): ?strin } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param int $for_api rd_kafka_admin_op_t - * @return \FFI\CData|null rd_kafka_AdminOptions_t* + *

    Create a new AdminOptions object.

    + *
       The options object is not modified by the Admin API request APIs,
    +     *    (e.g. CreateTopics) and may be reused for multiple calls.
    +     * 
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param int $for_api rd_kafka_admin_op_t - Specifies what Admin API this AdminOptions object will be used for, which will enforce what AdminOptions_set_..() calls may be used based on the API, causing unsupported set..() calls to fail. Specifying RD_KAFKA_ADMIN_OP_ANY disables the enforcement allowing any option to be set, even if the option is not used in a future call to an Admin API method. + * @return \FFI\CData|null rd_kafka_AdminOptions_t* - a new AdminOptions object (which must be freed with rd_kafka_AdminOptions_destroy()), or NULL if for_api was set to an unknown API op type. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a685d10a84d9c230ea81545b280925483 */ public static function rd_kafka_AdminOptions_new(?\FFI\CData $rk, int $for_api): ?\FFI\CData { @@ -2871,12 +3295,17 @@ public static function rd_kafka_AdminOptions_destroy(?\FFI\CData $options): void } /** - * @param \FFI\CData|null $options rd_kafka_AdminOptions_t* - * @param int|null $timeout_ms int - * @param \FFI\CData|null $errstr char* + *

    Sets the overall request timeout, including broker lookup, request transmission, operation time on broker, and response.

    + * + * + *
    Remarks
    This option is valid for all Admin API requests.
    + * @param \FFI\CData|null $options rd_kafka_AdminOptions_t* - Admin options. + * @param int|null $timeout_ms int - Timeout in milliseconds, use -1 for indefinite timeout. Defaults to socket.timeout.ms. + * @param \FFI\CData|null $errstr char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr. * @param int|null $errstr_size size_t - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which case an error string will be written errstr. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9988b9e9984f7c884e11176beac17d62 */ public static function rd_kafka_AdminOptions_set_request_timeout(?\FFI\CData $options, ?int $timeout_ms, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -2884,12 +3313,19 @@ public static function rd_kafka_AdminOptions_set_request_timeout(?\FFI\CData $op } /** - * @param \FFI\CData|null $options rd_kafka_AdminOptions_t* - * @param int|null $timeout_ms int - * @param \FFI\CData|null $errstr char* + *

    Sets the broker's operation timeout, such as the timeout for CreateTopics to complete the creation of topics on the controller before returning a result to the application.

    + *

    CreateTopics: values <= 0 will return immediately after triggering topic creation, while > 0 will wait this long for topic creation to propagate in cluster. Default: 60 seconds.

    + *

    DeleteTopics: same semantics as CreateTopics. CreatePartitions: same semantics as CreateTopics.

    + * + * + *
    Remarks
    This option is valid for CreateTopics, DeleteTopics, CreatePartitions, and DeleteRecords.
    + * @param \FFI\CData|null $options rd_kafka_AdminOptions_t* - Admin options. + * @param int|null $timeout_ms int - Timeout in milliseconds. + * @param \FFI\CData|null $errstr char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr. * @param int|null $errstr_size size_t - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which case an error string will be written errstr. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a708390fc4e94ea64a98009d294116d4c */ public static function rd_kafka_AdminOptions_set_operation_timeout(?\FFI\CData $options, ?int $timeout_ms, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -2897,12 +3333,17 @@ public static function rd_kafka_AdminOptions_set_operation_timeout(?\FFI\CData $ } /** - * @param \FFI\CData|null $options rd_kafka_AdminOptions_t* - * @param int|null $true_or_false int - * @param \FFI\CData|null $errstr char* + *

    Tell broker to only validate the request, without performing the requested operation (create topics, etc).

    + * + * + *
    Remarks
    This option is valid for CreateTopics, CreatePartitions, AlterConfigs.
    + * @param \FFI\CData|null $options rd_kafka_AdminOptions_t* - Admin options. + * @param int|null $true_or_false int - Defaults to false. + * @param \FFI\CData|null $errstr char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr. * @param int|null $errstr_size size_t - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure in which case an error string will be written errstr. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ae94b1b0da81e43a4e928eea01952fb37 */ public static function rd_kafka_AdminOptions_set_validate_only(?\FFI\CData $options, ?int $true_or_false, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -2910,12 +3351,21 @@ public static function rd_kafka_AdminOptions_set_validate_only(?\FFI\CData $opti } /** - * @param \FFI\CData|null $options rd_kafka_AdminOptions_t* - * @param int|null $broker_id int32_t - * @param \FFI\CData|null $errstr char* + *

    Override what broker the Admin request will be sent to.

    + *

    By default, Admin requests are sent to the controller broker, with the following exceptions:

    + * + * + *
    Remarks
    This API should typically not be used, but serves as a workaround if new resource types are to the broker that the client does not know where to send.
    + * @param \FFI\CData|null $options rd_kafka_AdminOptions_t* - Admin Options. + * @param int|null $broker_id int32_t - The broker to send the request to. + * @param \FFI\CData|null $errstr char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr. * @param int|null $errstr_size size_t - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure in which case an error string will be written errstr. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ae4a84f4fb6aaac8e09e186a7245a9727 */ public static function rd_kafka_AdminOptions_set_broker(?\FFI\CData $options, ?int $broker_id, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -2933,13 +3383,15 @@ public static function rd_kafka_AdminOptions_set_opaque(?\FFI\CData $options, $o } /** - * @param string|null $topic const char* - * @param int|null $num_partitions int - * @param int|null $replication_factor int - * @param \FFI\CData|null $errstr char* + *

    Create a new NewTopic object. This object is later passed to rd_kafka_CreateTopics().

    + * @param string|null $topic const char* - Topic name to create. + * @param int|null $num_partitions int - Number of partitions in topic, or -1 to use the broker's default partition count (>= 2.4.0). + * @param int|null $replication_factor int - Default replication factor for the topic's partitions, or -1 to use the broker's default replication factor (>= 2.4.0) or if set_replica_assignment() will be used. + * @param \FFI\CData|null $errstr char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr. * @param int|null $errstr_size size_t - * @return \FFI\CData|null rd_kafka_NewTopic_t* + * @return \FFI\CData|null rd_kafka_NewTopic_t* - a new allocated NewTopic object, or NULL if the input parameters are invalid. Use rd_kafka_NewTopic_destroy() to free object when done. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9f172cd394d605c9fabca3959e101f8b */ public static function rd_kafka_NewTopic_new(?string $topic, ?int $num_partitions, ?int $replication_factor, ?\FFI\CData $errstr, ?int $errstr_size): ?\FFI\CData { @@ -2966,14 +3418,25 @@ public static function rd_kafka_NewTopic_destroy_array(?\FFI\CData $new_topics, } /** + *

    Set the replica (broker) assignment for partition to the replica set in broker_ids (of broker_id_cnt elements).

    + *
    Remarks
    When this method is used, rd_kafka_NewTopic_new() must have been called with a replication_factor of -1.
    + *
    + * An application must either set the replica assignment for all new partitions, or none.
    + *
    + * If called, this function must be called consecutively for each partition, starting at 0.
    + *
    + * Use rd_kafka_metadata() to retrieve the list of brokers in the cluster.
    + * + *
    See also
    rd_kafka_AdminOptions_set_validate_only()
    * @param \FFI\CData|null $new_topic rd_kafka_NewTopic_t* * @param int|null $partition int32_t * @param \FFI\CData|null $broker_ids int32_t* * @param int|null $broker_id_cnt size_t * @param \FFI\CData|null $errstr char* * @param int|null $errstr_size size_t - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code if the arguments were invalid. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ade3dabb32c5e0ed5edcbc038a9345144 */ public static function rd_kafka_NewTopic_set_replica_assignment(?\FFI\CData $new_topic, ?int $partition, ?\FFI\CData $broker_ids, ?int $broker_id_cnt, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -2981,11 +3444,18 @@ public static function rd_kafka_NewTopic_set_replica_assignment(?\FFI\CData $new } /** + *

    Set (broker-side) topic configuration name/value pair.

    + *
    Remarks
    The name and value are not validated by the client, the validation takes place on the broker.
    + * + *
    See also
    rd_kafka_AdminOptions_set_validate_only()
    + *
    + * http://kafka.apache.org/documentation.html#topicconfigs
    * @param \FFI\CData|null $new_topic rd_kafka_NewTopic_t* * @param string|null $name const char* * @param string|null $value const char* - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code if the arguments were invalid. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a29ae721839f76830e7f7b8ba5df64bed */ public static function rd_kafka_NewTopic_set_config(?\FFI\CData $new_topic, ?string $name, ?string $value): int { @@ -2993,12 +3463,21 @@ public static function rd_kafka_NewTopic_set_config(?\FFI\CData $new_topic, ?str } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $new_topics rd_kafka_NewTopic_t** - * @param int|null $new_topic_cnt size_t - * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - * @param \FFI\CData|null $rkqu rd_kafka_queue_t* + *

    Create topics in cluster as specified by the new_topics array of size new_topic_cnt elements.

    + * + *

    Supported admin options:

    + *
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_CREATETOPICS_RESULT
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param \FFI\CData|null $new_topics rd_kafka_NewTopic_t** - Array of new topics to create. + * @param int|null $new_topic_cnt size_t - Number of elements in new_topics array. + * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults. + * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - Queue to emit result on. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6988b48e1a255fbb2badd6c6e11692f3 */ public static function rd_kafka_CreateTopics(?\FFI\CData $rk, ?\FFI\CData $new_topics, ?int $new_topic_cnt, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { @@ -3006,10 +3485,13 @@ public static function rd_kafka_CreateTopics(?\FFI\CData $rk, ?\FFI\CData $new_t } /** - * @param \FFI\CData|null $result const rd_kafka_CreateTopics_result_t* - * @param \FFI\CData|null $cntp size_t* + *

    Get an array of topic results from a CreateTopics result.

    + *

    The returned topics life-time is the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_CreateTopics_result_t* - Result to get topics from. + * @param \FFI\CData|null $cntp size_t* - Updated to the number of elements in the array. * @return \FFI\CData|null const rd_kafka_topic_result_t** * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a49451d78d3c7e5bd23367624c03897ea */ public static function rd_kafka_CreateTopics_result_topics(?\FFI\CData $result, ?\FFI\CData $cntp): ?\FFI\CData { @@ -3017,9 +3499,11 @@ public static function rd_kafka_CreateTopics_result_topics(?\FFI\CData $result, } /** - * @param string|null $topic const char* - * @return \FFI\CData|null rd_kafka_DeleteTopic_t* + *

    Create a new DeleteTopic object. This object is later passed to rd_kafka_DeleteTopics().

    + * @param string|null $topic const char* - ) - Topic name to delete. + * @return \FFI\CData|null rd_kafka_DeleteTopic_t* - a new allocated DeleteTopic object. Use rd_kafka_DeleteTopic_destroy() to free object when done. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a1e0a19934a857ca8b59ddbe8dbe24b0e */ public static function rd_kafka_DeleteTopic_new(?string $topic): ?\FFI\CData { @@ -3046,12 +3530,16 @@ public static function rd_kafka_DeleteTopic_destroy_array(?\FFI\CData $del_topic } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $del_topics rd_kafka_DeleteTopic_t** - * @param int|null $del_topic_cnt size_t - * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - * @param \FFI\CData|null $rkqu rd_kafka_queue_t* + *

    Delete topics from cluster as specified by the topics array of size topic_cnt elements.

    + * + *
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETETOPICS_RESULT
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param \FFI\CData|null $del_topics rd_kafka_DeleteTopic_t** - Array of topics to delete. + * @param int|null $del_topic_cnt size_t - Number of elements in topics array. + * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults. + * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - Queue to emit result on. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ae6f7fd92bb32f14c30bbcd22cbda2b4a */ public static function rd_kafka_DeleteTopics(?\FFI\CData $rk, ?\FFI\CData $del_topics, ?int $del_topic_cnt, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { @@ -3059,10 +3547,13 @@ public static function rd_kafka_DeleteTopics(?\FFI\CData $rk, ?\FFI\CData $del_t } /** - * @param \FFI\CData|null $result const rd_kafka_DeleteTopics_result_t* - * @param \FFI\CData|null $cntp size_t* + *

    Get an array of topic results from a DeleteTopics result.

    + *

    The returned topics life-time is the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_DeleteTopics_result_t* - Result to get topic results from. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. * @return \FFI\CData|null const rd_kafka_topic_result_t** * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aef97d0923f7e8cec4fbec9866d32614a */ public static function rd_kafka_DeleteTopics_result_topics(?\FFI\CData $result, ?\FFI\CData $cntp): ?\FFI\CData { @@ -3070,12 +3561,14 @@ public static function rd_kafka_DeleteTopics_result_topics(?\FFI\CData $result, } /** - * @param string|null $topic const char* - * @param int|null $new_total_cnt size_t - * @param \FFI\CData|null $errstr char* + *

    Create a new NewPartitions. This object is later passed to rd_kafka_CreatePartitions() to increase the number of partitions to new_total_cnt for an existing topic.

    + * @param string|null $topic const char* - Topic name to create more partitions for. + * @param int|null $new_total_cnt size_t - Increase the topic's partition count to this value. + * @param \FFI\CData|null $errstr char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr. * @param int|null $errstr_size size_t - * @return \FFI\CData|null rd_kafka_NewPartitions_t* + * @return \FFI\CData|null rd_kafka_NewPartitions_t* - a new allocated NewPartitions object, or NULL if the input parameters are invalid. Use rd_kafka_NewPartitions_destroy() to free object when done. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abdda69e9215a7e41d5b97cb7ed0c6bd3 */ public static function rd_kafka_NewPartitions_new(?string $topic, ?int $new_total_cnt, ?\FFI\CData $errstr, ?int $errstr_size): ?\FFI\CData { @@ -3102,14 +3595,25 @@ public static function rd_kafka_NewPartitions_destroy_array(?\FFI\CData $new_par } /** + *

    Set the replica (broker id) assignment for new_partition_idx to the replica set in broker_ids (of broker_id_cnt elements).

    + *
    Remarks
    An application must either set the replica assignment for all new partitions, or none.
    + *
    + * If called, this function must be called consecutively for each new partition being created, where new_partition_idx 0 is the first new partition, 1 is the second, and so on.
    + *
    + * broker_id_cnt should match the topic's replication factor.
    + *
    + * Use rd_kafka_metadata() to retrieve the list of brokers in the cluster.
    + * + *
    See also
    rd_kafka_AdminOptions_set_validate_only()
    * @param \FFI\CData|null $new_parts rd_kafka_NewPartitions_t* * @param int|null $new_partition_idx int32_t * @param \FFI\CData|null $broker_ids int32_t* * @param int|null $broker_id_cnt size_t * @param \FFI\CData|null $errstr char* * @param int|null $errstr_size size_t - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code if the arguments were invalid. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6b38e8b7f3357844a7ef9b31cdc4af3c */ public static function rd_kafka_NewPartitions_set_replica_assignment(?\FFI\CData $new_parts, ?int $new_partition_idx, ?\FFI\CData $broker_ids, ?int $broker_id_cnt, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -3117,12 +3621,21 @@ public static function rd_kafka_NewPartitions_set_replica_assignment(?\FFI\CData } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $new_parts rd_kafka_NewPartitions_t** - * @param int|null $new_parts_cnt size_t - * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - * @param \FFI\CData|null $rkqu rd_kafka_queue_t* + *

    Create additional partitions for the given topics, as specified by the new_parts array of size new_parts_cnt elements.

    + * + *

    Supported admin options:

    + *
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param \FFI\CData|null $new_parts rd_kafka_NewPartitions_t** - Array of topics for which new partitions are to be created. + * @param int|null $new_parts_cnt size_t - Number of elements in new_parts array. + * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults. + * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - Queue to emit result on. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a1bed8947475ee683033d7988f0b09378 */ public static function rd_kafka_CreatePartitions(?\FFI\CData $rk, ?\FFI\CData $new_parts, ?int $new_parts_cnt, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { @@ -3130,10 +3643,13 @@ public static function rd_kafka_CreatePartitions(?\FFI\CData $rk, ?\FFI\CData $n } /** - * @param \FFI\CData|null $result const rd_kafka_CreatePartitions_result_t* - * @param \FFI\CData|null $cntp size_t* + *

    Get an array of topic results from a CreatePartitions result.

    + *

    The returned topics life-time is the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_CreatePartitions_result_t* - Result o get topic results from. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. * @return \FFI\CData|null const rd_kafka_topic_result_t** * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a2c2b415181b4e4d633d40a63ac01d681 */ public static function rd_kafka_CreatePartitions_result_topics(?\FFI\CData $result, ?\FFI\CData $cntp): ?\FFI\CData { @@ -3141,9 +3657,11 @@ public static function rd_kafka_CreatePartitions_result_topics(?\FFI\CData $resu } /** - * @param int $confsource rd_kafka_ConfigSource_t - * @return string|null const char* + * + * @param int $confsource rd_kafka_ConfigSource_t - ) + * @return string|null const char* - a string representation of the confsource. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad946f1e500a4ce28ed9378d495586076 */ public static function rd_kafka_ConfigSource_name(int $confsource): ?string { @@ -3151,9 +3669,11 @@ public static function rd_kafka_ConfigSource_name(int $confsource): ?string } /** - * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - * @return string|null const char* + * + * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - ) + * @return string|null const char* - the configuration property name * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad68d560b00fdbad3a1e994bc423791f6 */ public static function rd_kafka_ConfigEntry_name(?\FFI\CData $entry): ?string { @@ -3161,9 +3681,11 @@ public static function rd_kafka_ConfigEntry_name(?\FFI\CData $entry): ?string } /** - * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - * @return string|null const char* + * + * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - ) + * @return string|null const char* - the configuration value, may be NULL for sensitive or unset properties. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a744ffc08e0ad1a86e62fa4d4da956770 */ public static function rd_kafka_ConfigEntry_value(?\FFI\CData $entry): ?string { @@ -3171,9 +3693,11 @@ public static function rd_kafka_ConfigEntry_value(?\FFI\CData $entry): ?string } /** - * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - * @return int rd_kafka_ConfigSource_t + * + * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - ) + * @return int rd_kafka_ConfigSource_t - the config source. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aff8550b61387790dad381a6c1129d1b3 */ public static function rd_kafka_ConfigEntry_source(?\FFI\CData $entry): int { @@ -3181,9 +3705,11 @@ public static function rd_kafka_ConfigEntry_source(?\FFI\CData $entry): int } /** - * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - * @return int|null int + *
    Remarks
    Shall only be used on a DescribeConfigs result, otherwise returns -1.
    + * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - ) + * @return int|null int - 1 if the config property is read-only on the broker, else 0. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa7474dc1258cceb041491d2b75e96bcb */ public static function rd_kafka_ConfigEntry_is_read_only(?\FFI\CData $entry): ?int { @@ -3191,9 +3717,11 @@ public static function rd_kafka_ConfigEntry_is_read_only(?\FFI\CData $entry): ?i } /** - * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - * @return int|null int + *
    Remarks
    Shall only be used on a DescribeConfigs result, otherwise returns -1.
    + * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - ) + * @return int|null int - 1 if the config property is set to its default value on the broker, else 0. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abd5a9f49896cff71788bae6cbd60fed9 */ public static function rd_kafka_ConfigEntry_is_default(?\FFI\CData $entry): ?int { @@ -3201,9 +3729,13 @@ public static function rd_kafka_ConfigEntry_is_default(?\FFI\CData $entry): ?int } /** - * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - * @return int|null int + *
    Remarks
    An application should take care not to include the value of sensitive configuration entries in its output.
    + *
    + * Shall only be used on a DescribeConfigs result, otherwise returns -1.
    + * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - ) + * @return int|null int - 1 if the config property contains sensitive information (such as security configuration), else 0. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a7cca5d473790759cb59b5fefdc992a02 */ public static function rd_kafka_ConfigEntry_is_sensitive(?\FFI\CData $entry): ?int { @@ -3211,9 +3743,11 @@ public static function rd_kafka_ConfigEntry_is_sensitive(?\FFI\CData $entry): ?i } /** - * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - * @return int|null int + * + * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - ) + * @return int|null int - 1 if this entry is a synonym, else 0. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a1a4c6d3323beb8546fb9c0f7d13b2abb */ public static function rd_kafka_ConfigEntry_is_synonym(?\FFI\CData $entry): ?int { @@ -3221,10 +3755,14 @@ public static function rd_kafka_ConfigEntry_is_synonym(?\FFI\CData $entry): ?int } /** - * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - * @param \FFI\CData|null $cntp size_t* - * @return \FFI\CData|null const rd_kafka_ConfigEntry_t** + *
    Remarks
    The lifetime of the returned entry is the same as conf .
    + *
    + * Shall only be used on a DescribeConfigs result, otherwise returns NULL.
    + * @param \FFI\CData|null $entry const rd_kafka_ConfigEntry_t* - Entry to get synonyms for. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. + * @return \FFI\CData|null const rd_kafka_ConfigEntry_t** - the synonym config entry array. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8a9e60a1274f1889a6d20d8ac6ffb948 */ public static function rd_kafka_ConfigEntry_synonyms(?\FFI\CData $entry, ?\FFI\CData $cntp): ?\FFI\CData { @@ -3232,9 +3770,11 @@ public static function rd_kafka_ConfigEntry_synonyms(?\FFI\CData $entry, ?\FFI\C } /** - * @param int $restype rd_kafka_ResourceType_t - * @return string|null const char* + * + * @param int $restype rd_kafka_ResourceType_t - ) + * @return string|null const char* - a string representation of the restype * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ac026f797b199338b30a684087d2365f8 */ public static function rd_kafka_ResourceType_name(int $restype): ?string { @@ -3242,10 +3782,12 @@ public static function rd_kafka_ResourceType_name(int $restype): ?string } /** - * @param int $restype rd_kafka_ResourceType_t - * @param string|null $resname const char* - * @return \FFI\CData|null rd_kafka_ConfigResource_t* + *

    Create new ConfigResource object.

    + * @param int $restype rd_kafka_ResourceType_t - The resource type (e.g., RD_KAFKA_RESOURCE_TOPIC) + * @param string|null $resname const char* - The resource name (e.g., the topic name) + * @return \FFI\CData|null rd_kafka_ConfigResource_t* - a newly allocated object * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#adedb3f1f695c5e5a90ee03fbdc7f6e4a */ public static function rd_kafka_ConfigResource_new(int $restype, ?string $resname): ?\FFI\CData { @@ -3272,11 +3814,15 @@ public static function rd_kafka_ConfigResource_destroy_array(?\FFI\CData $config } /** - * @param \FFI\CData|null $config rd_kafka_ConfigResource_t* - * @param string|null $name const char* - * @param string|null $value const char* - * @return int rd_kafka_resp_err_t + *

    Set configuration name value pair.

    + * + *

    This will overwrite the current value.

    + * @param \FFI\CData|null $config rd_kafka_ConfigResource_t* - ConfigResource to set config property on. + * @param string|null $name const char* - Configuration name, depends on resource type. + * @param string|null $value const char* - Configuration value, depends on resource type and name. Set to NULL to revert configuration value to default. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if config was added to resource, or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#afbc1ffd0a5a938e6a9998365903010e5 */ public static function rd_kafka_ConfigResource_set_config(?\FFI\CData $config, ?string $name, ?string $value): int { @@ -3284,10 +3830,13 @@ public static function rd_kafka_ConfigResource_set_config(?\FFI\CData $config, ? } /** - * @param \FFI\CData|null $config const rd_kafka_ConfigResource_t* - * @param \FFI\CData|null $cntp size_t* + *

    Get an array of config entries from a ConfigResource object.

    + *

    The returned object life-times are the same as the config object.

    + * @param \FFI\CData|null $config const rd_kafka_ConfigResource_t* - ConfigResource to get configs from. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. * @return \FFI\CData|null const rd_kafka_ConfigEntry_t** * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5e95b289deaebe43f7a73874e8428f4d */ public static function rd_kafka_ConfigResource_configs(?\FFI\CData $config, ?\FFI\CData $cntp): ?\FFI\CData { @@ -3295,9 +3844,11 @@ public static function rd_kafka_ConfigResource_configs(?\FFI\CData $config, ?\FF } /** - * @param \FFI\CData|null $config const rd_kafka_ConfigResource_t* - * @return int rd_kafka_ResourceType_t + * + * @param \FFI\CData|null $config const rd_kafka_ConfigResource_t* - ) + * @return int rd_kafka_ResourceType_t - the ResourceType for config * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#afb2a6e896b74d2f07246ede8269eb8f6 */ public static function rd_kafka_ConfigResource_type(?\FFI\CData $config): int { @@ -3305,9 +3856,11 @@ public static function rd_kafka_ConfigResource_type(?\FFI\CData $config): int } /** - * @param \FFI\CData|null $config const rd_kafka_ConfigResource_t* - * @return string|null const char* + * + * @param \FFI\CData|null $config const rd_kafka_ConfigResource_t* - ) + * @return string|null const char* - the name for config * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af70880e690cb45ff728a331e7e40baac */ public static function rd_kafka_ConfigResource_name(?\FFI\CData $config): ?string { @@ -3315,9 +3868,11 @@ public static function rd_kafka_ConfigResource_name(?\FFI\CData $config): ?strin } /** - * @param \FFI\CData|null $config const rd_kafka_ConfigResource_t* - * @return int rd_kafka_resp_err_t + * + * @param \FFI\CData|null $config const rd_kafka_ConfigResource_t* - ) + * @return int rd_kafka_resp_err_t - the error for this resource from an AlterConfigs request * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6a4c8cf9e53472c30c6b1dd78e3ba560 */ public static function rd_kafka_ConfigResource_error(?\FFI\CData $config): int { @@ -3325,9 +3880,11 @@ public static function rd_kafka_ConfigResource_error(?\FFI\CData $config): int } /** - * @param \FFI\CData|null $config const rd_kafka_ConfigResource_t* - * @return string|null const char* + * + * @param \FFI\CData|null $config const rd_kafka_ConfigResource_t* - ) + * @return string|null const char* - the error string for this resource from an AlterConfigs request, or NULL if no error. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a077c4a83e8b7fcf23057dad5a2af812f */ public static function rd_kafka_ConfigResource_error_string(?\FFI\CData $config): ?string { @@ -3335,12 +3892,17 @@ public static function rd_kafka_ConfigResource_error_string(?\FFI\CData $config) } /** + *

    Update the configuration for the specified resources. Updates are not transactional so they may succeed for a subset of the provided resources while the others fail. The configuration for a particular resource is updated atomically, replacing values using the provided ConfigEntrys and reverting unspecified ConfigEntrys to their default values.

    + *
    Remarks
    Requires broker version >=0.11.0.0
    + *
    Warning
    AlterConfigs will replace all existing configuration for the provided resources with the new configuration given, reverting all other configuration to their default values.
    + *
    Remarks
    Multiple resources and resource types may be set, but at most one resource of type RD_KAFKA_RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource.
    * @param \FFI\CData|null $rk rd_kafka_t* * @param \FFI\CData|null $configs rd_kafka_ConfigResource_t** * @param int|null $config_cnt size_t * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* * @param \FFI\CData|null $rkqu rd_kafka_queue_t* * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ade8d161dfb86a94179d286f36ec5b28e */ public static function rd_kafka_AlterConfigs(?\FFI\CData $rk, ?\FFI\CData $configs, ?int $config_cnt, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { @@ -3348,10 +3910,14 @@ public static function rd_kafka_AlterConfigs(?\FFI\CData $rk, ?\FFI\CData $confi } /** - * @param \FFI\CData|null $result const rd_kafka_AlterConfigs_result_t* - * @param \FFI\CData|null $cntp size_t* - * @return \FFI\CData|null const rd_kafka_ConfigResource_t** + *

    Get an array of resource results from a AlterConfigs result.

    + *

    Use rd_kafka_ConfigResource_error() and rd_kafka_ConfigResource_error_string() to extract per-resource error results on the returned array elements.

    + *

    The returned object life-times are the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_AlterConfigs_result_t* - Result object to get resource results from. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. + * @return \FFI\CData|null const rd_kafka_ConfigResource_t** - an array of ConfigResource elements, or NULL if not available. * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a7a8a2fdf3473111a84e14bcb0a88ef05 */ public static function rd_kafka_AlterConfigs_result_resources(?\FFI\CData $result, ?\FFI\CData $cntp): ?\FFI\CData { @@ -3359,12 +3925,21 @@ public static function rd_kafka_AlterConfigs_result_resources(?\FFI\CData $resul } /** + *

    Get configuration for the specified resources in configs.

    + *

    The returned configuration includes default values and the rd_kafka_ConfigEntry_is_default() or rd_kafka_ConfigEntry_source() methods may be used to distinguish them from user supplied values.

    + *

    The value of config entries where rd_kafka_ConfigEntry_is_sensitive() is true will always be NULL to avoid disclosing sensitive information, such as security settings.

    + *

    Configuration entries where rd_kafka_ConfigEntry_is_read_only() is true can't be updated (with rd_kafka_AlterConfigs()).

    + *

    Synonym configuration entries are returned if the broker supports it (broker version >= 1.1.0). See rd_kafka_ConfigEntry_synonyms().

    + *
    Remarks
    Requires broker version >=0.11.0.0
    + *
    + * Multiple resources and resource types may be requested, but at most one resource of type RD_KAFKA_RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource.
    * @param \FFI\CData|null $rk rd_kafka_t* * @param \FFI\CData|null $configs rd_kafka_ConfigResource_t** * @param int|null $config_cnt size_t * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* * @param \FFI\CData|null $rkqu rd_kafka_queue_t* * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ac6c5d7be2cd16c6875ddc946c9918df2 */ public static function rd_kafka_DescribeConfigs(?\FFI\CData $rk, ?\FFI\CData $configs, ?int $config_cnt, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { @@ -3372,10 +3947,13 @@ public static function rd_kafka_DescribeConfigs(?\FFI\CData $rk, ?\FFI\CData $co } /** - * @param \FFI\CData|null $result const rd_kafka_DescribeConfigs_result_t* - * @param \FFI\CData|null $cntp size_t* + *

    Get an array of resource results from a DescribeConfigs result.

    + *

    The returned resources life-time is the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_DescribeConfigs_result_t* - Result object to get resource results from. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. * @return \FFI\CData|null const rd_kafka_ConfigResource_t** * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a1f8eb6562e32b2638d75ca22161ab185 */ public static function rd_kafka_DescribeConfigs_result_resources(?\FFI\CData $result, ?\FFI\CData $cntp): ?\FFI\CData { @@ -3383,9 +3961,11 @@ public static function rd_kafka_DescribeConfigs_result_resources(?\FFI\CData $re } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @return \FFI\CData|null const rd_kafka_conf_t* + *
    Remarks
    the returned object is read-only and its lifetime is the same as the rd_kafka_t object.
    + * @param \FFI\CData|null $rk rd_kafka_t* - ) + * @return \FFI\CData|null const rd_kafka_conf_t* - the configuration object used by an rd_kafka_t instance. For use with rd_kafka_conf_get(), et.al., to extract configuration properties from a running client. * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a71e233cd19b13572772df2d43074a97d */ public static function rd_kafka_conf(?\FFI\CData $rk): ?\FFI\CData { @@ -3393,9 +3973,23 @@ public static function rd_kafka_conf(?\FFI\CData $rk): ?\FFI\CData } /** - * @param \FFI\CData|null $conf rd_kafka_conf_t* - * @param \FFI\CData|\Closure $oauthbearer_token_refresh_cb void(*)(rd_kafka_t*, const char*, void*) + *

    Set SASL/OAUTHBEARER token refresh callback in provided conf object.

    + * + *

    The SASL/OAUTHBEARER token refresh callback is triggered via rd_kafka_poll() whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved, typically based on the configuration defined in sasl.oauthbearer.config.

    + *

    The callback should invoke rd_kafka_oauthbearer_set_token() or rd_kafka_oauthbearer_set_token_failure() to indicate success or failure, respectively.

    + *

    The refresh operation is eventable and may be received via rd_kafka_queue_poll() with an event type of RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH.

    + *

    Note that before any SASL/OAUTHBEARER broker connection can succeed the application must call rd_kafka_oauthbearer_set_token() once – either directly or, more typically, by invoking either rd_kafka_poll(), rd_kafka_consumer_poll(), rd_kafka_queue_poll(), etc, in order to cause retrieval of an initial token to occur.

    + *

    Alternatively, the application can enable the SASL queue by calling rd_kafka_conf_enable_sasl_queue() on the configuration object prior to creating the client instance, get the SASL queue with rd_kafka_queue_get_sasl(), and either serve the queue manually by calling rd_kafka_queue_poll(), or redirecting the queue to the background thread to have the queue served automatically. For the latter case the SASL queue must be forwarded to the background queue with rd_kafka_queue_forward(). A convenience function is available to automatically forward the SASL queue to librdkafka's background thread, see rd_kafka_sasl_background_callbacks_enable().

    + *

    An unsecured JWT refresh handler is provided by librdkafka for development and testing purposes, it is enabled by setting the enable.sasl.oauthbearer.unsecure.jwt property to true and is mutually exclusive to using a refresh callback.

    + *
    See also
    rd_kafka_sasl_background_callbacks_enable()
    + *
    + * rd_kafka_queue_get_sasl()
    + * @param \FFI\CData|null $conf rd_kafka_conf_t* - the configuration to mutate. + * @param \FFI\CData|\Closure $oauthbearer_token_refresh_cb void(*)(rd_kafka_t*, const char*, void*) - the callback to set; callback function arguments:
    + * rk - Kafka handle
    + * oauthbearer_config - Value of configuration property sasl.oauthbearer.config. opaque - Application-provided opaque set via rd_kafka_conf_set_opaque() * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a988395722598f63396d7a1bedb22adaf */ public static function rd_kafka_conf_set_oauthbearer_token_refresh_cb(?\FFI\CData $conf, $oauthbearer_token_refresh_cb): void { @@ -3403,10 +3997,20 @@ public static function rd_kafka_conf_set_oauthbearer_token_refresh_cb(?\FFI\CDat } /** + *

    Sets the verification callback of the broker certificate.

    + *

    The verification callback is triggered from internal librdkafka threads upon connecting to a broker. On each connection attempt the callback will be called for each certificate in the broker's certificate chain, starting at the root certification, as long as the application callback returns 1 (valid certificate). broker_name and broker_id correspond to the broker the connection is being made to. The x509_error argument indicates if OpenSSL's verification of the certificate succeed (0) or failed (an OpenSSL error code). The application may set the SSL context error code by returning 0 from the verify callback and providing a non-zero SSL context error code in x509_error. If the verify callback sets x509_error to 0, returns 1, and the original x509_error was non-zero, the error on the SSL context will be cleared. x509_error is always a valid pointer to an int.

    + *

    depth is the depth of the current certificate in the chain, starting at the root certificate.

    + *

    The certificate itself is passed in binary DER format in buf of size size.

    + *

    The callback must return 1 if verification succeeds, or 0 if verification fails and then write a human-readable error message to errstr (limited to errstr_size bytes, including nul-term).

    + *

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    + * + *
    Warning
    This callback will be called from internal librdkafka threads.
    + *
    Remarks
    See <openssl/x509_vfy.h> in the OpenSSL source distribution for a list of x509_error codes.
    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param \FFI\CData|\Closure $ssl_cert_verify_cb int(*)(rd_kafka_t*, const char*, int32_t, int*, int, const char*, size_t, char*, size_t, void*) - * @return int rd_kafka_conf_res_t + * @return int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK if SSL is supported in this build, else RD_KAFKA_CONF_INVALID. * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ae0b29e178bc565c9b8b3d5dfc60c18c5 */ public static function rd_kafka_conf_set_ssl_cert_verify_cb(?\FFI\CData $conf, $ssl_cert_verify_cb): int { @@ -3414,15 +4018,28 @@ public static function rd_kafka_conf_set_ssl_cert_verify_cb(?\FFI\CData $conf, $ } /** - * @param \FFI\CData|null $conf rd_kafka_conf_t* - * @param int $cert_type rd_kafka_cert_type_t - * @param int $cert_enc rd_kafka_cert_enc_t - * @param \FFI\CData|object|string|null $buffer const void* - * @param int|null $size size_t - * @param \FFI\CData|null $errstr char* - * @param int|null $errstr_size size_t - * @return int rd_kafka_conf_res_t + *

    Set certificate/key cert_type from the cert_enc encoded memory at buffer of size bytes.

    + * + * + *
    Remarks
    Calling this method multiple times with the same cert_type will replace the previous value.
    + *
    + * Calling this method with buffer set to NULL will clear the configuration for cert_type.
    + *
    + * The private key may require a password, which must be specified with the ssl.key.password configuration property prior to calling this function.
    + *
    + * Private and public keys in PEM format may also be set with the ssl.key.pem and ssl.certificate.pem configuration properties.
    + *
    + * CA certificate in PEM format may also be set with the ssl.ca.pem configuration property.
    + * @param \FFI\CData|null $conf rd_kafka_conf_t* - Configuration object. + * @param int $cert_type rd_kafka_cert_type_t - Certificate or key type to configure. + * @param int $cert_enc rd_kafka_cert_enc_t - Buffer encoding type. + * @param \FFI\CData|object|string|null $buffer const void* - Memory pointer to encoded certificate or key. The memory is not referenced after this function returns. + * @param int|null $size size_t - Size of memory at buffer. + * @param \FFI\CData|null $errstr char* - Memory were a human-readable error string will be written on failure. + * @param int|null $errstr_size size_t - Size of errstr, including space for nul-terminator. + * @return int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK on success or RD_KAFKA_CONF_INVALID if the memory in buffer is of incorrect encoding, or if librdkafka was not built with SSL support. * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a55fa4d223e6214b7be94a87f9985177a */ public static function rd_kafka_conf_set_ssl_cert(?\FFI\CData $conf, int $cert_type, int $cert_enc, $buffer, ?int $size, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -3430,9 +4047,14 @@ public static function rd_kafka_conf_set_ssl_cert(?\FFI\CData $conf, int $cert_t } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return string|null const char* + *

    The returned memory is read-only and its lifetime is the same as the event object.

    + *

    Event types:

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return string|null const char* - the associated configuration string for the event, or NULL if the configuration property is not set or if not applicable for the given event type. * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a7e1534cd509d7b233ba4b3d2efea5278 */ public static function rd_kafka_event_config_string(?\FFI\CData $rkev): ?string { @@ -3440,16 +4062,28 @@ public static function rd_kafka_event_config_string(?\FFI\CData $rkev): ?string } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $token_value const char* - * @param int|null $md_lifetime_ms int64_t - * @param string|null $md_principal_name const char* - * @param \FFI\CData|null $extensions const char** - * @param int|null $extension_size size_t - * @param \FFI\CData|null $errstr char* + *

    Set SASL/OAUTHBEARER token and metadata.

    + * + *

    The SASL/OAUTHBEARER token refresh callback or event handler should invoke this method upon success. The extension keys must not include the reserved key "`auth`", and all extension keys and values must conform to the required format as per https://tools.ietf.org/html/rfc7628#section-3.1:

    key            = 1*(ALPHA)
    +     * value          = *(VCHAR / SP / HTAB / CR / LF )
    +     * 
    + *
    See also
    rd_kafka_oauthbearer_set_token_failure
    + *
    + * rd_kafka_conf_set_oauthbearer_token_refresh_cb
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $token_value const char* - the mandatory token value to set, often (but not necessarily) a JWS compact serialization as per https://tools.ietf.org/html/rfc7515#section-3.1. + * @param int|null $md_lifetime_ms int64_t - when the token expires, in terms of the number of milliseconds since the epoch. + * @param string|null $md_principal_name const char* - the mandatory Kafka principal name associated with the token. + * @param \FFI\CData|null $extensions const char** - optional SASL extensions key-value array with extensions_size elements (number of keys * 2), where [i] is the key and [i+1] is the key's value, to be communicated to the broker as additional key-value pairs during the initial client response as per https://tools.ietf.org/html/rfc7628#section-3.1. The key-value pairs are copied. + * @param int|null $extension_size size_t - the number of SASL extension keys plus values, which must be a non-negative multiple of 2. + * @param \FFI\CData|null $errstr char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr. * @param int|null $errstr_size size_t - * @return int rd_kafka_resp_err_t + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise errstr set and:
    + * RD_KAFKA_RESP_ERR__INVALID_ARG if any of the arguments are invalid;
    + * RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not supported by this build;
    + * RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is not configured as the client's authentication mechanism.
    * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a16d208d8ccfeee3d7084402ae24b641f */ public static function rd_kafka_oauthbearer_set_token(?\FFI\CData $rk, ?string $token_value, ?int $md_lifetime_ms, ?string $md_principal_name, ?\FFI\CData $extensions, ?int $extension_size, ?\FFI\CData $errstr, ?int $errstr_size): int { @@ -3457,10 +4091,21 @@ public static function rd_kafka_oauthbearer_set_token(?\FFI\CData $rk, ?string $ } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $errstr const char* - * @return int rd_kafka_resp_err_t + *

    SASL/OAUTHBEARER token refresh failure indicator.

    + * + *

    The SASL/OAUTHBEARER token refresh callback or event handler should invoke this method upon failure.

    + * + *
    See also
    rd_kafka_oauthbearer_set_token
    + *
    + * rd_kafka_conf_set_oauthbearer_token_refresh_cb
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $errstr const char* - mandatory human readable error reason for failing to acquire a token. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise:
    + * RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not supported by this build;
    + * RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is not configured as the client's authentication mechanism,
    + * RD_KAFKA_RESP_ERR__INVALID_ARG if no error string is supplied. * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5a88c547172a961cea4c854f01740ce8 */ public static function rd_kafka_oauthbearer_set_token_failure(?\FFI\CData $rk, ?string $errstr): int { @@ -3468,12 +4113,16 @@ public static function rd_kafka_oauthbearer_set_token_failure(?\FFI\CData $rk, ? } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param int $thread_type rd_kafka_thread_type_t - * @param string|null $thread_name const char* - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    on_thread_start() is called from a newly created librdkafka-managed thread.

    + * + *
    Warning
    The on_thread_start() interceptor is called from internal librdkafka threads. An on_thread_start() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
    + * @param \FFI\CData|null $rk rd_kafka_t* - The client instance. + * @param int $thread_type rd_kafka_thread_type_t - Thread type. + * @param string|null $thread_name const char* - Human-readable thread name, may not be unique. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @return int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored. * @since 1.2.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa882b123107dd0fecca114dea26a4c18 */ public static function rd_kafka_interceptor_f_on_thread_start_t(?\FFI\CData $rk, int $thread_type, ?string $thread_name, $ic_opaque): int { @@ -3481,12 +4130,17 @@ public static function rd_kafka_interceptor_f_on_thread_start_t(?\FFI\CData $rk, } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param int $thread_type rd_kafka_thread_type_t - * @param string|null $thread_name const char* - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    on_thread_exit() is called just prior to a librdkafka-managed thread exiting from the exiting thread itself.

    + * + *
    Remarks
    Depending on the thread type, librdkafka may execute additional code on the thread after on_thread_exit() returns.
    + *
    Warning
    The on_thread_exit() interceptor is called from internal librdkafka threads. An on_thread_exit() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
    + * @param \FFI\CData|null $rk rd_kafka_t* - The client instance. + * @param int $thread_type rd_kafka_thread_type_t - Thread type.n + * @param string|null $thread_name const char* - Human-readable thread name, may not be unique. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @return int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored. * @since 1.2.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ae89a2054959ba519c3851889ee69576c */ public static function rd_kafka_interceptor_f_on_thread_exit_t(?\FFI\CData $rk, int $thread_type, ?string $thread_name, $ic_opaque): int { @@ -3494,12 +4148,14 @@ public static function rd_kafka_interceptor_f_on_thread_exit_t(?\FFI\CData $rk, } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $ic_name const char* + *

    Append an on_thread_start() interceptor.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $ic_name const char* - Interceptor name, used in logging. * @param \FFI\CData|\Closure $on_thread_start rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_start_t*)(rd_kafka_t*, rd_kafka_thread_type_t, const char*, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.2.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa46a3bd81acc3861979f43b63f7ea1c8 */ public static function rd_kafka_interceptor_add_on_thread_start(?\FFI\CData $rk, ?string $ic_name, $on_thread_start, $ic_opaque): int { @@ -3507,12 +4163,14 @@ public static function rd_kafka_interceptor_add_on_thread_start(?\FFI\CData $rk, } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $ic_name const char* + *

    Append an on_thread_exit() interceptor.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $ic_name const char* - Interceptor name, used in logging. * @param \FFI\CData|\Closure $on_thread_exit rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t*)(rd_kafka_t*, rd_kafka_thread_type_t, const char*, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.2.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a876a95f3d989ad7cbff048e3449674e6 */ public static function rd_kafka_interceptor_add_on_thread_exit(?\FFI\CData $rk, ?string $ic_name, $on_thread_exit, $ic_opaque): int { @@ -3635,9 +4293,11 @@ public static function rd_kafka_mock_broker_set_rack(?\FFI\CData $mcluster, ?int } /** - * @param \FFI\CData|null $error const rd_kafka_error_t* - * @return int rd_kafka_resp_err_t + * + * @param \FFI\CData|null $error const rd_kafka_error_t* - ) + * @return int rd_kafka_resp_err_t - the error code for error or RD_KAFKA_RESP_ERR_NO_ERROR if error is NULL. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6b4f430e5cc4f8b89ec23aaa5063b739 */ public static function rd_kafka_error_code(?\FFI\CData $error): int { @@ -3645,9 +4305,12 @@ public static function rd_kafka_error_code(?\FFI\CData $error): int } /** - * @param \FFI\CData|null $error const rd_kafka_error_t* - * @return string|null const char* + *
    Remarks
    The lifetime of the returned pointer is the same as the error object.
    + *
    See also
    rd_kafka_err2name()
    + * @param \FFI\CData|null $error const rd_kafka_error_t* - ) + * @return string|null const char* - the error code name for error, e.g, "ERR_UNKNOWN_MEMBER_ID", or an empty string if error is NULL. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a3b32c027854efbf7f52392019a15bc67 */ public static function rd_kafka_error_name(?\FFI\CData $error): ?string { @@ -3655,9 +4318,11 @@ public static function rd_kafka_error_name(?\FFI\CData $error): ?string } /** - * @param \FFI\CData|null $error const rd_kafka_error_t* - * @return string|null const char* + *
    Remarks
    The lifetime of the returned pointer is the same as the error object.
    + * @param \FFI\CData|null $error const rd_kafka_error_t* - ) + * @return string|null const char* - a human readable error string for error, or an empty string if error is NULL. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aea75812786d33b35175b3764e7a342df */ public static function rd_kafka_error_string(?\FFI\CData $error): ?string { @@ -3665,9 +4330,11 @@ public static function rd_kafka_error_string(?\FFI\CData $error): ?string } /** - * @param \FFI\CData|null $error const rd_kafka_error_t* - * @return int|null int + * + * @param \FFI\CData|null $error const rd_kafka_error_t* - ) + * @return int|null int - 1 if the error is a fatal error, indicating that the client instance is no longer usable, else 0 (also if error is NULL). * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a97ba623d3eadd7f6b64800c31e28b2d2 */ public static function rd_kafka_error_is_fatal(?\FFI\CData $error): ?int { @@ -3675,9 +4342,11 @@ public static function rd_kafka_error_is_fatal(?\FFI\CData $error): ?int } /** - * @param \FFI\CData|null $error const rd_kafka_error_t* - * @return int|null int + * + * @param \FFI\CData|null $error const rd_kafka_error_t* - ) + * @return int|null int - 1 if the operation may be retried, else 0 (also if error is NULL). * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acc4ee1ecb953e1a409528a99ab0c5451 */ public static function rd_kafka_error_is_retriable(?\FFI\CData $error): ?int { @@ -3685,9 +4354,11 @@ public static function rd_kafka_error_is_retriable(?\FFI\CData $error): ?int } /** - * @param \FFI\CData|null $error const rd_kafka_error_t* - * @return int|null int + *
    Remarks
    The return value of this method is only valid for errors returned by the transactional API.
    + * @param \FFI\CData|null $error const rd_kafka_error_t* - ) + * @return int|null int - 1 if the error is an abortable transaction error in which case the application must call rd_kafka_abort_transaction() and start a new transaction with rd_kafka_begin_transaction() if it wishes to proceed with transactions. Else returns 0 (also if error is NULL). * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab07a417d941737db523808dec1c488dd */ public static function rd_kafka_error_txn_requires_abort(?\FFI\CData $error): ?int { @@ -3695,8 +4366,11 @@ public static function rd_kafka_error_txn_requires_abort(?\FFI\CData $error): ?i } /** - * @param \FFI\CData|null $error rd_kafka_error_t* + *

    Free and destroy an error object.

    + *
    Remarks
    As a conveniance it is permitted to pass a NULL error.
    + * @param \FFI\CData|null $error rd_kafka_error_t* - ) * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a3090ec84b9a8aa910d11f81f555d04da */ public static function rd_kafka_error_destroy(?\FFI\CData $error): void { @@ -3704,11 +4378,15 @@ public static function rd_kafka_error_destroy(?\FFI\CData $error): void } /** + *

    Create a new error object with error code and optional human readable error string in fmt.

    + *

    This method is mainly to be used for mocking errors in application test code.

    + *

    The returned object must be destroyed with rd_kafka_error_destroy().

    * @param int $code rd_kafka_resp_err_t * @param string|null $fmt const char* * @param mixed ...$args * @return \FFI\CData|null rd_kafka_error_t* * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#afd0d2df97ab446896ccf4ea87f709809 */ public static function rd_kafka_error_new(int $code, ?string $fmt, ...$args): ?\FFI\CData { @@ -3716,14 +4394,18 @@ public static function rd_kafka_error_new(int $code, ?string $fmt, ...$args): ?\ } /** + *

    FNV-1a partitioner.

    + *

    Uses consistent hashing to map identical keys onto identical partitions using FNV-1a hashing.

    + *

    The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

    * @param \FFI\CData|null $rkt const rd_kafka_topic_t* * @param \FFI\CData|object|string|null $key const void* * @param int|null $keylen size_t * @param int|null $partition_cnt int32_t * @param \FFI\CData|object|string|null $rkt_opaque void* * @param \FFI\CData|object|string|null $msg_opaque void* - * @return int|null int32_t + * @return int|null int32_t - a partition between 0 and partition_cnt - 1. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abf216e03c718df36bb37e1affbc8c8f7 */ public static function rd_kafka_msg_partitioner_fnv1a(?\FFI\CData $rkt, $key, ?int $keylen, ?int $partition_cnt, $rkt_opaque, $msg_opaque): ?int { @@ -3731,14 +4413,18 @@ public static function rd_kafka_msg_partitioner_fnv1a(?\FFI\CData $rkt, $key, ?i } /** + *

    Consistent-Random FNV-1a partitioner.

    + *

    Uses consistent hashing to map identical keys onto identical partitions using FNV-1a hashing. Messages without keys will be assigned via the random partitioner.

    + *

    The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

    * @param \FFI\CData|null $rkt const rd_kafka_topic_t* * @param \FFI\CData|object|string|null $key const void* * @param int|null $keylen size_t * @param int|null $partition_cnt int32_t * @param \FFI\CData|object|string|null $rkt_opaque void* * @param \FFI\CData|object|string|null $msg_opaque void* - * @return int|null int32_t + * @return int|null int32_t - a partition between 0 and partition_cnt - 1. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ac6bbf5fabd7def78d9d0677af2d383b0 */ public static function rd_kafka_msg_partitioner_fnv1a_random(?\FFI\CData $rkt, $key, ?int $keylen, ?int $partition_cnt, $rkt_opaque, $msg_opaque): ?int { @@ -3746,9 +4432,12 @@ public static function rd_kafka_msg_partitioner_fnv1a_random(?\FFI\CData $rkt, $ } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @return \FFI\CData|null rd_kafka_consumer_group_metadata_t* + *
    Remarks
    The returned pointer must be freed by the application using rd_kafka_consumer_group_metadata_destroy().
    + *
    See also
    rd_kafka_send_offsets_to_transaction()
    + * @param \FFI\CData|null $rk rd_kafka_t* - ) + * @return \FFI\CData|null rd_kafka_consumer_group_metadata_t* - the current consumer group metadata associated with this consumer, or NULL if rk is not a consumer configured with a group.id. This metadata object should be passed to the transactional producer's rd_kafka_send_offsets_to_transaction() API. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a167fe52efc68d470291bf17757b62c2e */ public static function rd_kafka_consumer_group_metadata(?\FFI\CData $rk): ?\FFI\CData { @@ -3756,9 +4445,13 @@ public static function rd_kafka_consumer_group_metadata(?\FFI\CData $rk): ?\FFI\ } /** - * @param string|null $group_id const char* + *

    Create a new consumer group metadata object. This is typically only used for writing tests.

    + * + *
    Remarks
    The returned pointer must be freed by the application using rd_kafka_consumer_group_metadata_destroy().
    + * @param string|null $group_id const char* - ) - The group id. * @return \FFI\CData|null rd_kafka_consumer_group_metadata_t* * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a30dc0292d9f172832451905fd6c28b22 */ public static function rd_kafka_consumer_group_metadata_new(?string $group_id): ?\FFI\CData { @@ -3775,11 +4468,17 @@ public static function rd_kafka_consumer_group_metadata_destroy(?\FFI\CData $arg } /** - * @param \FFI\CData|null $cgmd const rd_kafka_consumer_group_metadata_t* - * @param \FFI\CData|object|string|null $bufferp void** - * @param \FFI\CData|null $sizep size_t* - * @return \FFI\CData|null rd_kafka_error_t* + *

    Serialize the consumer group metadata to a binary format. This is mainly for client binding use and not for application use.

    + *
    Remarks
    The serialized metadata format is private and is not compatible across different versions or even builds of librdkafka. It should only be used in the same process runtime and must only be passed to rd_kafka_consumer_group_metadata_read().
    + * + * + *
    See also
    rd_kafka_consumer_group_metadata_read()
    + * @param \FFI\CData|null $cgmd const rd_kafka_consumer_group_metadata_t* - Metadata to be serialized. + * @param \FFI\CData|object|string|null $bufferp void** - On success this pointer will be updated to point to na allocated buffer containing the serialized metadata. The buffer must be freed with rd_kafka_mem_free(). + * @param \FFI\CData|null $sizep size_t* - The pointed to size will be updated with the size of the serialized buffer. + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aef280384f33889a4dfe02d92186f5928 */ public static function rd_kafka_consumer_group_metadata_write(?\FFI\CData $cgmd, $bufferp, ?\FFI\CData $sizep): ?\FFI\CData { @@ -3787,11 +4486,17 @@ public static function rd_kafka_consumer_group_metadata_write(?\FFI\CData $cgmd, } /** - * @param \FFI\CData|null $cgmdp rd_kafka_consumer_group_metadata_t** - * @param \FFI\CData|object|string|null $buffer const void* - * @param int|null $size size_t - * @return \FFI\CData|null rd_kafka_error_t* + *

    Reads serialized consumer group metadata and returns a consumer group metadata object. This is mainly for client binding use and not for application use.

    + *
    Remarks
    The serialized metadata format is private and is not compatible across different versions or even builds of librdkafka. It should only be used in the same process runtime and must only be passed to rd_kafka_consumer_group_metadata_read().
    + * + * + *
    See also
    rd_kafka_consumer_group_metadata_write()
    + * @param \FFI\CData|null $cgmdp rd_kafka_consumer_group_metadata_t** - On success this pointer will be updated to point to a new consumer group metadata object which must be freed with rd_kafka_consumer_group_metadata_destroy(). + * @param \FFI\CData|object|string|null $buffer const void* - Pointer to the serialized data. + * @param int|null $size size_t - Size of the serialized data. + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad3719a32498a270c04a48d64f2f301b3 */ public static function rd_kafka_consumer_group_metadata_read(?\FFI\CData $cgmdp, $buffer, ?int $size): ?\FFI\CData { @@ -3799,10 +4504,19 @@ public static function rd_kafka_consumer_group_metadata_read(?\FFI\CData $cgmdp, } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param int|null $timeout_ms int - * @return \FFI\CData|null rd_kafka_error_t* + *

    Initialize transactions for the producer instance.

    + *

    This function ensures any transactions initiated by previous instances of the producer with the same transactional.id are completed. If the previous instance failed with a transaction in progress the previous transaction will be aborted. This function needs to be called before any other transactional or produce functions are called when the transactional.id is configured.

    + *

    If the last transaction had begun completion (following transaction commit) but not yet finished, this function will await the previous transaction's completion.

    + *

    When any previous transactions have been fenced this function will acquire the internal producer id and epoch, used in all future transactional messages issued by this producer instance.

    + * + *
    Remarks
    This function may block up to timeout_ms milliseconds.
    + * + *
    Remarks
    The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
    + * @param \FFI\CData|null $rk rd_kafka_t* - Producer instance. + * @param int|null $timeout_ms int - The maximum time to block. On timeout the operation may continue in the background, depending on state, and it is okay to call init_transactions() again. + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether a fatal error has been raised by calling rd_kafka_error_is_fatal(). Error codes: RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction coordinator could be not be contacted within timeout_ms (retriable), RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE if the transaction coordinator is not available (retriable), RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS if a previous transaction would not complete within timeout_ms (retriable), RD_KAFKA_RESP_ERR__STATE if transactions have already been started or upon fatal error, RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE if the broker(s) do not support transactions (<Apache Kafka 0.11), this also raises a fatal error, RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT if the configured transaction.timeout.ms is outside the broker-configured range, this also raises a fatal error, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, or timeout_ms is out of range. Other error codes not listed here may be returned, depending on broker version. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abf3e48791cf53ac2c7e30a17ce896f74 */ public static function rd_kafka_init_transactions(?\FFI\CData $rk, ?int $timeout_ms): ?\FFI\CData { @@ -3810,9 +4524,25 @@ public static function rd_kafka_init_transactions(?\FFI\CData $rk, ?int $timeout } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @return \FFI\CData|null rd_kafka_error_t* + *

    Begin a new transaction.

    + *

    rd_kafka_init_transactions() must have been called successfully (once) before this function is called.

    + *

    Upon successful return from this function the application has to perform at least one of the following operations within transaction.timeout.ms to avoid timing out the transaction on the broker:

    + *

    Any messages produced, offsets sent (rd_kafka_send_offsets_to_transaction()), etc, after the successful return of this function will be part of the transaction and committed or aborted atomatically.

    + *

    Finish the transaction by calling rd_kafka_commit_transaction() or abort the transaction by calling rd_kafka_abort_transaction().

    + * + * + *
    Remarks
    With the transactional producer, rd_kafka_produce(), rd_kafka_producev(), et.al, are only allowed during an on-going transaction, as started with this function. Any produce call outside an on-going transaction, or for a failed transaction, will fail.
    + *
    + * The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
    + * @param \FFI\CData|null $rk rd_kafka_t* - ) - Producer instance. + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether a fatal error has been raised by calling rd_kafka_error_is_fatal(). Error codes: RD_KAFKA_RESP_ERR__STATE if a transaction is already in progress or upon fatal error, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance. Other error codes not listed here may be returned, depending on broker version. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a3c5338866b641957e9eeeaa9b60f5f9c */ public static function rd_kafka_begin_transaction(?\FFI\CData $rk): ?\FFI\CData { @@ -3820,12 +4550,24 @@ public static function rd_kafka_begin_transaction(?\FFI\CData $rk): ?\FFI\CData } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $offsets const rd_kafka_topic_partition_list_t* - * @param \FFI\CData|null $cgmetadata const rd_kafka_consumer_group_metadata_t* - * @param int|null $timeout_ms int - * @return \FFI\CData|null rd_kafka_error_t* + *

    Sends a list of topic partition offsets to the consumer group coordinator for cgmetadata, and marks the offsets as part part of the current transaction. These offsets will be considered committed only if the transaction is committed successfully.

    + *

    The offsets should be the next message your application will consume, i.e., the last processed message's offset + 1 for each partition. Either track the offsets manually during processing or use rd_kafka_position() (on the consumer) to get the current offsets for the partitions assigned to the consumer.

    + *

    Use this method at the end of a consume-transform-produce loop prior to committing the transaction with rd_kafka_commit_transaction().

    + * + *
    Remarks
    This function must be called on the transactional producer instance, not the consumer.
    + *
    + * The consumer must disable auto commits (set enable.auto.commit to false on the consumer).
    + *
    + * Logical and invalid offsets (such as RD_KAFKA_OFFSET_INVALID) in offsets will be ignored, if there are no valid offsets in offsets the function will return RD_KAFKA_RESP_ERR_NO_ERROR and no action will be taken.
    + * + *
    Remarks
    The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
    + * @param \FFI\CData|null $rk rd_kafka_t* - Producer instance. + * @param \FFI\CData|null $offsets const rd_kafka_topic_partition_list_t* - List of offsets to commit to the consumer group upon successful commit of the transaction. Offsets should be the next message to consume, e.g., last processed message + 1. + * @param \FFI\CData|null $cgmetadata const rd_kafka_consumer_group_metadata_t* - The current consumer group metadata as returned by rd_kafka_consumer_group_metadata() on the consumer instance the provided offsets were consumed from. + * @param int|null $timeout_ms int - Maximum time allowed to register the offsets on the broker. + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether an abortable or fatal error has been raised by calling rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal() respectively. Error codes: RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer transaction has been fenced by a newer producer instance, RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the producer is no longer authorized to perform transactional operations, RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED if the producer is not authorized to write the consumer offsets to the group coordinator, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, or if the consumer_group_id or offsets are empty, RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous rd_kafka_send_offsets_to_transaction() call is still in progress. Other error codes not listed here may be returned, depending on broker version. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a288b31742a707ca14cf07312fbcca881 */ public static function rd_kafka_send_offsets_to_transaction(?\FFI\CData $rk, ?\FFI\CData $offsets, ?\FFI\CData $cgmetadata, ?int $timeout_ms): ?\FFI\CData { @@ -3833,10 +4575,22 @@ public static function rd_kafka_send_offsets_to_transaction(?\FFI\CData $rk, ?\F } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param int|null $timeout_ms int - * @return \FFI\CData|null rd_kafka_error_t* + *

    Commit the current transaction (as started with rd_kafka_begin_transaction()).

    + *

    Any outstanding messages will be flushed (delivered) before actually committing the transaction.

    + *

    If any of the outstanding messages fail permanently the current transaction will enter the abortable error state and this function will return an abortable error, in this case the application must call rd_kafka_abort_transaction() before attempting a new transaction with rd_kafka_begin_transaction().

    + * + *
    Remarks
    It is strongly recommended to always pass -1 (remaining transaction time) as the timeout_ms. Using other values risk internal state desynchronization in case any of the underlying protocol requests fail.
    + *
    + * This function will block until all outstanding messages are delivered and the transaction commit request has been successfully handled by the transaction coordinator, or until timeout_ms expires, which ever comes first. On timeout the application may call the function again.
    + *
    + * Will automatically call rd_kafka_flush() to ensure all queued messages are delivered before attempting to commit the transaction. If the application has enabled RD_KAFKA_EVENT_DR it must serve the event queue in a separate thread since rd_kafka_flush() will not serve delivery reports in this mode.
    + * + *
    Remarks
    The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
    + * @param \FFI\CData|null $rk rd_kafka_t* - Producer instance. + * @param int|null $timeout_ms int - The maximum time to block. On timeout the operation may continue in the background, depending on state, and it is okay to call this function again. Pass -1 to use the remaining transaction timeout, this is the recommended use. + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether an abortable or fatal error has been raised by calling rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal() respectively. Error codes: RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be complete commmitted within timeout_ms, this is a retriable error as the commit continues in the background, RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer transaction has been fenced by a newer producer instance, RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the producer is no longer authorized to perform transactional operations, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, Other error codes not listed here may be returned, depending on broker version. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a63a2d72bf9ba444bcea457bf47c4642a */ public static function rd_kafka_commit_transaction(?\FFI\CData $rk, ?int $timeout_ms): ?\FFI\CData { @@ -3844,10 +4598,24 @@ public static function rd_kafka_commit_transaction(?\FFI\CData $rk, ?int $timeou } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param int|null $timeout_ms int - * @return \FFI\CData|null rd_kafka_error_t* + *

    Aborts the ongoing transaction.

    + *
       This function should also be used to recover from non-fatal abortable
    +     *    transaction errors.
    +     *
    +     *    Any outstanding messages will be purged and fail with
    +     *    RD_KAFKA_RESP_ERR__PURGE_INFLIGHT or RD_KAFKA_RESP_ERR__PURGE_QUEUE.
    +     *    See rd_kafka_purge() for details.
    +     * 
    + *
    Remarks
    It is strongly recommended to always pass -1 (remaining transaction time) as the timeout_ms. Using other values risk internal state desynchronization in case any of the underlying protocol requests fail.
    + *
    + * This function will block until all outstanding messages are purged and the transaction abort request has been successfully handled by the transaction coordinator, or until timeout_ms expires, which ever comes first. On timeout the application may call the function again. If the application has enabled RD_KAFKA_EVENT_DR it must serve the event queue in a separate thread since rd_kafka_flush() will not serve delivery reports in this mode.
    + * + *
    Remarks
    The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
    + * @param \FFI\CData|null $rk rd_kafka_t* - Producer instance. + * @param int|null $timeout_ms int - The maximum time to block. On timeout the operation may continue in the background, depending on state, and it is okay to call this function again. Pass -1 to use the remaining transaction timeout, this is the recommended use. + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether a fatal error has been raised by calling rd_kafka_error_is_fatal(). Error codes: RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be complete commmitted within timeout_ms, this is a retriable error as the commit continues in the background, RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer transaction has been fenced by a newer producer instance, RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the producer is no longer authorized to perform transactional operations, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, Other error codes not listed here may be returned, depending on broker version. * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a2e50d1043e1b16ed28ba6e372aa0909d */ public static function rd_kafka_abort_transaction(?\FFI\CData $rk, ?int $timeout_ms): ?\FFI\CData { @@ -3938,9 +4706,12 @@ public static function rd_kafka_mock_broker_set_rtt(?\FFI\CData $mcluster, ?int } /** - * @param \FFI\CData|null $rkmessage const rd_kafka_message_t* + *

    Returns the error string for an errored rd_kafka_message_t or NULL if there was no error.

    + *
    Remarks
    This function MUST NOT be used with the producer.
    + * @param \FFI\CData|null $rkmessage const rd_kafka_message_t* - ) * @return string|null const char* * @since 1.5.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5908f68997a5bc42695cff2a703618dd */ public static function rd_kafka_message_errstr(?\FFI\CData $rkmessage): ?string { @@ -3948,9 +4719,11 @@ public static function rd_kafka_message_errstr(?\FFI\CData $rkmessage): ?string } /** - * @param \FFI\CData|null $rkmessage const rd_kafka_message_t* - * @return int|null int32_t + *

    Returns the broker id of the broker the message was produced to or fetched from.

    + * @param \FFI\CData|null $rkmessage const rd_kafka_message_t* - ) + * @return int|null int32_t - a broker id if known, else -1. * @since 1.5.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa3ea50bbfa8d7fce4de91a34b1377b52 */ public static function rd_kafka_message_broker_id(?\FFI\CData $rkmessage): ?int { @@ -3958,11 +4731,16 @@ public static function rd_kafka_message_broker_id(?\FFI\CData $rkmessage): ?int } /** + *

    Produce and send a single message to broker.

    + *

    The message is defined by an array of rd_kafka_vu_t of count cnt.

    + * + *
    See also
    rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END
    * @param \FFI\CData|null $rk rd_kafka_t* * @param \FFI\CData|null $vus const rd_kafka_vu_t* * @param int|null $cnt size_t - * @return \FFI\CData|null rd_kafka_error_t* + * @return \FFI\CData|null rd_kafka_error_t* - an error object on failure or NULL on success. See rd_kafka_producev() for specific error codes. * @since 1.5.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a267a7eabf933733e542902533141bd47 */ public static function rd_kafka_produceva(?\FFI\CData $rk, ?\FFI\CData $vus, ?int $cnt): ?\FFI\CData { @@ -3970,11 +4748,16 @@ public static function rd_kafka_produceva(?\FFI\CData $rk, ?\FFI\CData $vus, ?in } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @param \FFI\CData|null $dst char* - * @param int|null $dstsize size_t - * @return int|null int + *

    Extract log debug context from event.

    + *

    Event types:

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - the event to extract data from. + * @param \FFI\CData|null $dst char* - destination string for comma separated list. + * @param int|null $dstsize size_t - size of provided dst buffer. + * @return int|null int - 0 on success or -1 if unsupported event type. * @since 1.5.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5c43c8164c856aeb3f3170808264458b */ public static function rd_kafka_event_debug_contexts(?\FFI\CData $rkev, ?\FFI\CData $dst, ?int $dstsize): ?int { @@ -3996,9 +4779,13 @@ public static function rd_kafka_mock_broker_push_request_errors(?\FFI\CData $mcl } /** - * @param \FFI\CData|null $conf rd_kafka_conf_t* - * @return \FFI\CData|null rd_kafka_topic_conf_t* + *

    Gets the default topic configuration as previously set with rd_kafka_conf_set_default_topic_conf() or that was implicitly created by configuring a topic-level property on the global conf object.

    + * + *
    Warning
    The returned topic configuration object is owned by the conf object. It may be modified but not destroyed and its lifetime is the same as the conf object or the next call to rd_kafka_conf_set_default_topic_conf().
    + * @param \FFI\CData|null $conf rd_kafka_conf_t* - ) + * @return \FFI\CData|null rd_kafka_topic_conf_t* - the conf's default topic configuration (if any), or NULL. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a622ed908e64c21ba53449216400109ee */ public static function rd_kafka_conf_get_default_topic_conf(?\FFI\CData $conf): ?\FFI\CData { @@ -4006,8 +4793,11 @@ public static function rd_kafka_conf_get_default_topic_conf(?\FFI\CData $conf): } /** - * @param \FFI\CData|null $rkqu rd_kafka_queue_t* + *

    Cancels the current rd_kafka_queue_poll() on rkqu.

    + *

    An application may use this from another thread to force an immediate return to the calling code (caller of rd_kafka_queue_poll()). Must not be used from signal handlers since that may cause deadlocks.

    + * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - ) * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af3f15671034a8a5c6d9a3b33cc1b6f3f */ public static function rd_kafka_queue_yield(?\FFI\CData $rkqu): void { @@ -4015,11 +4805,20 @@ public static function rd_kafka_queue_yield(?\FFI\CData $rkqu): void } /** + *

    Seek consumer for partitions in partitions to the per-partition offset in the .offset field of partitions.

    + *

    The offset may be either absolute (>= 0) or a logical offset.

    + *

    If timeout_ms is specified (not 0) the seek call will wait this long for the consumer to update its fetcher state for the given partition with the new offset. This guarantees that no previously fetched messages for the old offset (or fetch position) will be passed to the application.

    + *

    If the timeout is reached the internal state will be unknown to the caller and this function returns RD_KAFKA_RESP_ERR__TIMED_OUT.

    + *

    If timeout_ms is 0 it will initiate the seek but return immediately without any error reporting (e.g., async).

    + *

    This call will purge all pre-fetched messages for the given partition, which may be up to queued.max.message.kbytes in size. Repeated use of seek may thus lead to increased network usage as messages are re-fetched from the broker.

    + *

    Individual partition errors are reported in the per-partition .err field of partitions.

    + *
    Remarks
    Seek must only be performed for already assigned/consumed partitions, use rd_kafka_assign() (et.al) to set the initial starting offset for a new assignmenmt.
    * @param \FFI\CData|null $rk rd_kafka_t* * @param \FFI\CData|null $partitions rd_kafka_topic_partition_list_t* * @param int|null $timeout_ms int - * @return \FFI\CData|null rd_kafka_error_t* + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a7821331324eaab995d4a2baf42b78df5 */ public static function rd_kafka_seek_partitions(?\FFI\CData $rk, ?\FFI\CData $partitions, ?int $timeout_ms): ?\FFI\CData { @@ -4027,10 +4826,15 @@ public static function rd_kafka_seek_partitions(?\FFI\CData $rk, ?\FFI\CData $pa } /** + *

    Incrementally add partitions to the current assignment.

    + *

    If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, this method should be used in a rebalance callback to adjust the current assignment appropriately in the case where the rebalance type is RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. The application must pass the partition list passed to the callback (or a copy of it), even if the list is empty. partitions must not be NULL. This method may also be used outside the context of a rebalance callback.

    + * + *
    Remarks
    The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
    * @param \FFI\CData|null $rk rd_kafka_t* * @param \FFI\CData|null $partitions const rd_kafka_topic_partition_list_t* - * @return \FFI\CData|null rd_kafka_error_t* + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success, or an error object if the operation was unsuccessful. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a3bd9f42cf76b2a8cf2f4a4343abe8556 */ public static function rd_kafka_incremental_assign(?\FFI\CData $rk, ?\FFI\CData $partitions): ?\FFI\CData { @@ -4038,10 +4842,15 @@ public static function rd_kafka_incremental_assign(?\FFI\CData $rk, ?\FFI\CData } /** + *

    Incrementally remove partitions from the current assignment.

    + *

    If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, this method should be used in a rebalance callback to adjust the current assignment appropriately in the case where the rebalance type is RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. The application must pass the partition list passed to the callback (or a copy of it), even if the list is empty. partitions must not be NULL. This method may also be used outside the context of a rebalance callback.

    + * + *
    Remarks
    The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
    * @param \FFI\CData|null $rk rd_kafka_t* * @param \FFI\CData|null $partitions const rd_kafka_topic_partition_list_t* - * @return \FFI\CData|null rd_kafka_error_t* + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success, or an error object if the operation was unsuccessful. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a27f7bd18e42ed44f33932c2f9b6a4192 */ public static function rd_kafka_incremental_unassign(?\FFI\CData $rk, ?\FFI\CData $partitions): ?\FFI\CData { @@ -4049,9 +4858,11 @@ public static function rd_kafka_incremental_unassign(?\FFI\CData $rk, ?\FFI\CDat } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @return string|null const char* + *

    The rebalance protocol currently in use. This will be "NONE" if the consumer has not (yet) joined a group, else it will match the rebalance protocol ("EAGER", "COOPERATIVE") of the configured and selected assignor(s). All configured assignors must have the same protocol type, meaning online migration of a consumer group from using one protocol to another (in particular upgading from EAGER to COOPERATIVE) without a restart is not currently supported.

    + * @param \FFI\CData|null $rk rd_kafka_t* - ) + * @return string|null const char* - NULL on error, or one of "NONE", "EAGER", "COOPERATIVE" on success. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a57d367712406848d59cdaae97ab29354 */ public static function rd_kafka_rebalance_protocol(?\FFI\CData $rk): ?string { @@ -4059,9 +4870,12 @@ public static function rd_kafka_rebalance_protocol(?\FFI\CData $rk): ?string } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @return int|null int + *

    Check whether the consumer considers the current assignment to have been lost involuntarily. This method is only applicable for use with a high level subscribing consumer. Assignments are revoked immediately when determined to have been lost, so this method is only useful when reacting to a RD_KAFKA_EVENT_REBALANCE event or from within a rebalance_cb. Partitions that have been lost may already be owned by other members in the group and therefore commiting offsets, for example, may fail.

    + *
    Remarks
    Calling rd_kafka_assign(), rd_kafka_incremental_assign() or rd_kafka_incremental_unassign() resets this flag.
    + * @param \FFI\CData|null $rk rd_kafka_t* - ) + * @return int|null int - Returns 1 if the current partition assignment is considered lost, 0 otherwise. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5383a36bacd08e9ef52bfe29accb34a1 */ public static function rd_kafka_assignment_lost(?\FFI\CData $rk): ?int { @@ -4069,12 +4883,16 @@ public static function rd_kafka_assignment_lost(?\FFI\CData $rk): ?int } /** - * @param string|null $group_id const char* - * @param int|null $generation_id int32_t - * @param string|null $member_id const char* - * @param string|null $group_instance_id const char* + *

    Create a new consumer group metadata object. This is typically only used for writing tests.

    + * + *
    Remarks
    The returned pointer must be freed by the application using rd_kafka_consumer_group_metadata_destroy().
    + * @param string|null $group_id const char* - The group id. + * @param int|null $generation_id int32_t - The group generation id. + * @param string|null $member_id const char* - The group member id. + * @param string|null $group_instance_id const char* - The group instance id (may be NULL). * @return \FFI\CData|null rd_kafka_consumer_group_metadata_t* * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a679b7a0b807f69b72a4088bcee0e13c9 */ public static function rd_kafka_consumer_group_metadata_new_with_genid(?string $group_id, ?int $generation_id, ?string $member_id, ?string $group_instance_id): ?\FFI\CData { @@ -4082,9 +4900,11 @@ public static function rd_kafka_consumer_group_metadata_new_with_genid(?string $ } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_DeleteRecords_result_t* + *

    Event types: RD_KAFKA_EVENT_DELETERECORDS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_DeleteRecords_result_t* - the result of a DeleteRecords request, or NULL if event is of different type. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad98fee7bec920114c73c1af90be53416 */ public static function rd_kafka_event_DeleteRecords_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -4092,9 +4912,13 @@ public static function rd_kafka_event_DeleteRecords_result(?\FFI\CData $rkev): ? } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_DeleteGroups_result_t* + *

    Get DeleteGroups result.

    + * + *

    Event types: RD_KAFKA_EVENT_DELETEGROUPS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_DeleteGroups_result_t* - the result of a DeleteGroups request, or NULL if event is of different type. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad417e30a46183425d3549acb0a2c7e9f */ public static function rd_kafka_event_DeleteGroups_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -4102,9 +4926,13 @@ public static function rd_kafka_event_DeleteGroups_result(?\FFI\CData $rkev): ?\ } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_DeleteConsumerGroupOffsets_result_t* + *

    Get DeleteConsumerGroupOffsets result.

    + * + *

    Event types: RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_DeleteConsumerGroupOffsets_result_t* - the result of a DeleteConsumerGroupOffsets request, or NULL if event is of different type. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a36ec38fe2135740ac6b41a4141fbc566 */ public static function rd_kafka_event_DeleteConsumerGroupOffsets_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -4112,9 +4940,13 @@ public static function rd_kafka_event_DeleteConsumerGroupOffsets_result(?\FFI\CD } /** - * @param \FFI\CData|null $groupres const rd_kafka_group_result_t* - * @return \FFI\CData|null const rd_kafka_error_t* + *

    Group result provides per-group operation result information.

    + * + *
    Remarks
    lifetime of the returned error is the same as the groupres.
    + * @param \FFI\CData|null $groupres const rd_kafka_group_result_t* - ) + * @return \FFI\CData|null const rd_kafka_error_t* - the error for the given group result, or NULL on success. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a97cda56301d566204aad9320e2b748ba */ public static function rd_kafka_group_result_error(?\FFI\CData $groupres): ?\FFI\CData { @@ -4122,9 +4954,11 @@ public static function rd_kafka_group_result_error(?\FFI\CData $groupres): ?\FFI } /** - * @param \FFI\CData|null $groupres const rd_kafka_group_result_t* - * @return string|null const char* + *
    Remarks
    lifetime of the returned string is the same as the groupres.
    + * @param \FFI\CData|null $groupres const rd_kafka_group_result_t* - ) + * @return string|null const char* - the name of the group for the given group result. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a7f8f41cfe01a2b977e4a2d54644e765d */ public static function rd_kafka_group_result_name(?\FFI\CData $groupres): ?string { @@ -4132,9 +4966,11 @@ public static function rd_kafka_group_result_name(?\FFI\CData $groupres): ?strin } /** - * @param \FFI\CData|null $groupres const rd_kafka_group_result_t* - * @return \FFI\CData|null const rd_kafka_topic_partition_list_t* + *
    Remarks
    lifetime of the returned list is the same as the groupres.
    + * @param \FFI\CData|null $groupres const rd_kafka_group_result_t* - ) + * @return \FFI\CData|null const rd_kafka_topic_partition_list_t* - the partitions/offsets for the given group result, if applicable to the request type, else NULL. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5c039926ae9dca773ebec2f9147f2fa1 */ public static function rd_kafka_group_result_partitions(?\FFI\CData $groupres): ?\FFI\CData { @@ -4142,9 +4978,12 @@ public static function rd_kafka_group_result_partitions(?\FFI\CData $groupres): } /** - * @param \FFI\CData|null $before_offsets const rd_kafka_topic_partition_list_t* - * @return \FFI\CData|null rd_kafka_DeleteRecords_t* + *

    Create a new DeleteRecords object. This object is later passed to rd_kafka_DeleteRecords().

    + *

    before_offsets must contain topic, partition, and offset is the offset before which the messages will be deleted (exclusive). Set offset to RD_KAFKA_OFFSET_END (high-watermark) in order to delete all data in the partition.

    + * @param \FFI\CData|null $before_offsets const rd_kafka_topic_partition_list_t* - ) - For each partition delete all messages up to but not including the specified offset. + * @return \FFI\CData|null rd_kafka_DeleteRecords_t* - a new allocated DeleteRecords object. Use rd_kafka_DeleteRecords_destroy() to free object when done. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#adbbe46dc5bfe86a3ead89c130b13d9f8 */ public static function rd_kafka_DeleteRecords_new(?\FFI\CData $before_offsets): ?\FFI\CData { @@ -4171,12 +5010,20 @@ public static function rd_kafka_DeleteRecords_destroy_array(?\FFI\CData $del_rec } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $del_records rd_kafka_DeleteRecords_t** - * @param int|null $del_record_cnt size_t - * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - * @param \FFI\CData|null $rkqu rd_kafka_queue_t* + *

    Delete records (messages) in topic partitions older than the offsets provided.

    + * + *

    Supported admin options:

    + *
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETERECORDS_RESULT
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param \FFI\CData|null $del_records rd_kafka_DeleteRecords_t** - The offsets to delete (up to). Currently only one DeleteRecords_t (but containing multiple offsets) is supported. + * @param int|null $del_record_cnt size_t - The number of elements in del_records, must be 1. + * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults. + * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - Queue to emit result on. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aad06345d1b4cd13e56d7253b889b2349 */ public static function rd_kafka_DeleteRecords(?\FFI\CData $rk, ?\FFI\CData $del_records, ?int $del_record_cnt, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { @@ -4184,9 +5031,12 @@ public static function rd_kafka_DeleteRecords(?\FFI\CData $rk, ?\FFI\CData $del_ } /** - * @param \FFI\CData|null $result const rd_kafka_DeleteRecords_result_t* + *

    Get a list of topic and partition results from a DeleteRecords result. The returned objects will contain topic, partition, offset and err. offset will be set to the post-deletion low-watermark (smallest available offset of all live replicas). err will be set per-partition if deletion failed.

    + *

    The returned object's life-time is the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_DeleteRecords_result_t* - ) * @return \FFI\CData|null const rd_kafka_topic_partition_list_t* * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ac894a0e005db1fa671c1802ba3563b69 */ public static function rd_kafka_DeleteRecords_result_offsets(?\FFI\CData $result): ?\FFI\CData { @@ -4194,9 +5044,11 @@ public static function rd_kafka_DeleteRecords_result_offsets(?\FFI\CData $result } /** - * @param string|null $group const char* - * @return \FFI\CData|null rd_kafka_DeleteGroup_t* + *

    Create a new DeleteGroup object. This object is later passed to rd_kafka_DeleteGroups().

    + * @param string|null $group const char* - ) - Name of group to delete. + * @return \FFI\CData|null rd_kafka_DeleteGroup_t* - a new allocated DeleteGroup object. Use rd_kafka_DeleteGroup_destroy() to free object when done. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6c3bb6e2b05f40bed55ffdfe2b4daa59 */ public static function rd_kafka_DeleteGroup_new(?string $group): ?\FFI\CData { @@ -4223,12 +5075,16 @@ public static function rd_kafka_DeleteGroup_destroy_array(?\FFI\CData $del_group } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $del_groups rd_kafka_DeleteGroup_t** - * @param int|null $del_group_cnt size_t - * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - * @param \FFI\CData|null $rkqu rd_kafka_queue_t* + *

    Delete groups from cluster as specified by the del_groups array of size del_group_cnt elements.

    + * + *
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETEGROUPS_RESULT
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param \FFI\CData|null $del_groups rd_kafka_DeleteGroup_t** - Array of groups to delete. + * @param int|null $del_group_cnt size_t - Number of elements in del_groups array. + * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults. + * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - Queue to emit result on. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af929e4f20999b43d7bd3724502c1b27a */ public static function rd_kafka_DeleteGroups(?\FFI\CData $rk, ?\FFI\CData $del_groups, ?int $del_group_cnt, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { @@ -4236,10 +5092,13 @@ public static function rd_kafka_DeleteGroups(?\FFI\CData $rk, ?\FFI\CData $del_g } /** - * @param \FFI\CData|null $result const rd_kafka_DeleteGroups_result_t* - * @param \FFI\CData|null $cntp size_t* + *

    Get an array of group results from a DeleteGroups result.

    + *

    The returned groups life-time is the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_DeleteGroups_result_t* - Result to get group results from. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. * @return \FFI\CData|null const rd_kafka_group_result_t** * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a20097b0ced17760c9aa624552dbf47e7 */ public static function rd_kafka_DeleteGroups_result_groups(?\FFI\CData $result, ?\FFI\CData $cntp): ?\FFI\CData { @@ -4247,10 +5106,12 @@ public static function rd_kafka_DeleteGroups_result_groups(?\FFI\CData $result, } /** - * @param string|null $group const char* - * @param \FFI\CData|null $partitions const rd_kafka_topic_partition_list_t* - * @return \FFI\CData|null rd_kafka_DeleteConsumerGroupOffsets_t* + *

    Create a new DeleteConsumerGroupOffsets object. This object is later passed to rd_kafka_DeleteConsumerGroupOffsets().

    + * @param string|null $group const char* - Consumer group id. + * @param \FFI\CData|null $partitions const rd_kafka_topic_partition_list_t* - Partitions to delete committed offsets for. Only the topic and partition fields are used. + * @return \FFI\CData|null rd_kafka_DeleteConsumerGroupOffsets_t* - a new allocated DeleteConsumerGroupOffsets object. Use rd_kafka_DeleteConsumerGroupOffsets_destroy() to free object when done. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8bb6df9dd6ce4e86dea385f90151d81f */ public static function rd_kafka_DeleteConsumerGroupOffsets_new(?string $group, ?\FFI\CData $partitions): ?\FFI\CData { @@ -4277,12 +5138,18 @@ public static function rd_kafka_DeleteConsumerGroupOffsets_destroy_array(?\FFI\C } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $del_grpoffsets rd_kafka_DeleteConsumerGroupOffsets_t** - * @param int|null $del_grpoffsets_cnt size_t - * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - * @param \FFI\CData|null $rkqu rd_kafka_queue_t* + *

    Delete committed offsets for a set of partitions in a conusmer group. This will succeed at the partition level only if the group is not actively subscribed to the corresponding topic.

    + * + *
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
    + *
    + * The current implementation only supports one group per invocation.
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param \FFI\CData|null $del_grpoffsets rd_kafka_DeleteConsumerGroupOffsets_t** - Array of group committed offsets to delete. MUST only be one single element. + * @param int|null $del_grpoffsets_cnt size_t - Number of elements in del_grpoffsets array. MUST always be 1. + * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults. + * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - Queue to emit result on. * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#afa8e8de419157cf97a083b9355d3385f */ public static function rd_kafka_DeleteConsumerGroupOffsets(?\FFI\CData $rk, ?\FFI\CData $del_grpoffsets, ?int $del_grpoffsets_cnt, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { @@ -4290,10 +5157,13 @@ public static function rd_kafka_DeleteConsumerGroupOffsets(?\FFI\CData $rk, ?\FF } /** - * @param \FFI\CData|null $result const rd_kafka_DeleteConsumerGroupOffsets_result_t* - * @param \FFI\CData|null $cntp size_t* + *

    Get an array of results from a DeleteConsumerGroupOffsets result.

    + *

    The returned groups life-time is the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_DeleteConsumerGroupOffsets_result_t* - Result to get group results from. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. * @return \FFI\CData|null const rd_kafka_group_result_t** * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a014654eaec7601933e6ec5d5613ef4b0 */ public static function rd_kafka_DeleteConsumerGroupOffsets_result_groups(?\FFI\CData $result, ?\FFI\CData $cntp): ?\FFI\CData { @@ -4323,19 +5193,23 @@ public static function rd_kafka_mock_push_request_errors_array(?\FFI\CData $mclu } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param int|null $sockfd int - * @param string|null $brokername const char* - * @param int|null $brokerid int32_t - * @param int|null $ApiKey int16_t - * @param int|null $ApiVersion int16_t - * @param int|null $CorrId int32_t - * @param int|null $size size_t - * @param int|null $rtt int64_t - * @param int $err rd_kafka_resp_err_t - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + *

    on_response_received() is called when a protocol response has been fully received from a broker TCP connection socket but before the response payload is parsed.

    + * + *
    Warning
    The on_response_received() interceptor is called from internal librdkafka broker threads. An on_response_received() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
    + * @param \FFI\CData|null $rk rd_kafka_t* - The client instance. + * @param int|null $sockfd int - Socket file descriptor (always -1). + * @param string|null $brokername const char* - Broker response was received from, possibly empty string on error. + * @param int|null $brokerid int32_t - Broker response was received from. + * @param int|null $ApiKey int16_t - Kafka protocol request type or -1 on error. + * @param int|null $ApiVersion int16_t - Kafka protocol request type version or -1 on error. + * @param int|null $CorrId int32_t - Kafka protocol request correlation id, possibly -1 on error. + * @param int|null $size size_t - Size of response, possibly 0 on error. + * @param int|null $rtt int64_t - Request round-trip-time in microseconds, possibly -1 on error. + * @param int $err rd_kafka_resp_err_t - Receive error. + * @param \FFI\CData|object|string|null $ic_opaque void* - The interceptor's opaque pointer specified in ..add..(). + * @return int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored. * @since 1.6.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8e6b3f288085b2495cec22bc912265b9 */ public static function rd_kafka_interceptor_f_on_response_received_t(?\FFI\CData $rk, ?int $sockfd, ?string $brokername, ?int $brokerid, ?int $ApiKey, ?int $ApiVersion, ?int $CorrId, ?int $size, ?int $rtt, int $err, $ic_opaque): int { @@ -4343,12 +5217,14 @@ public static function rd_kafka_interceptor_f_on_response_received_t(?\FFI\CData } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param string|null $ic_name const char* + *

    Append an on_response_received() interceptor.

    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param string|null $ic_name const char* - Interceptor name, used in logging. * @param \FFI\CData|\Closure $on_response_received rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_response_received_t*)(rd_kafka_t*, int, const char*, int32_t, int16_t, int16_t, int32_t, size_t, int64_t, rd_kafka_resp_err_t, void*) - * @param \FFI\CData|object|string|null $ic_opaque void* - * @return int rd_kafka_resp_err_t + * @param \FFI\CData|object|string|null $ic_opaque void* - Opaque value that will be passed to the function. + * @return int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf. * @since 1.6.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a809061ceda162f8d8ad9663cabf66b3d */ public static function rd_kafka_interceptor_add_on_response_received(?\FFI\CData $rk, ?string $ic_name, $on_response_received, $ic_opaque): int { @@ -4356,9 +5232,15 @@ public static function rd_kafka_interceptor_add_on_response_received(?\FFI\CData } /** - * @param \FFI\CData|null $conf rd_kafka_conf_t* - * @param \FFI\CData|object|string|null $callback_data void* + *

    Set callback_data for OpenSSL engine.

    + * + *
    Remarks
    The ssl.engine.location configuration must be set for this to have affect.
    + *
    + * The memory pointed to by value must remain valid for the lifetime of the configuration object and any Kafka clients that use it.
    + * @param \FFI\CData|null $conf rd_kafka_conf_t* - Configuration object. + * @param \FFI\CData|object|string|null $callback_data void* - passed to engine callbacks, e.g. ENGINE_load_ssl_client_cert. * @since 1.7.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a07871a230226c6acbdf3cd062545812b */ public static function rd_kafka_conf_set_engine_callback_data(?\FFI\CData $conf, $callback_data): void { @@ -4366,11 +5248,16 @@ public static function rd_kafka_conf_set_engine_callback_data(?\FFI\CData $conf, } /** + *

    Allocate and zero memory using the same allocator librdkafka uses.

    + *

    This is typically an abstraction for the calloc(3) call and makes sure the application can use the same memory allocator as librdkafka for allocating pointers that are used by librdkafka.

    + *

    rk can be set to return memory allocated by a specific rk instance otherwise pass NULL for rk.

    + *
    Remarks
    Memory allocated by rd_kafka_mem_calloc() must be freed using rd_kafka_mem_free()
    * @param \FFI\CData|null $rk rd_kafka_t* * @param int|null $num size_t * @param int|null $size size_t * @return \FFI\CData|object|string|null void* * @since 1.7.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a74216ba5f5d1c1d8ec7936c225a73286 */ public static function rd_kafka_mem_calloc(?\FFI\CData $rk, ?int $num, ?int $size) { @@ -4378,10 +5265,15 @@ public static function rd_kafka_mem_calloc(?\FFI\CData $rk, ?int $num, ?int $siz } /** + *

    Allocate memory using the same allocator librdkafka uses.

    + *

    This is typically an abstraction for the malloc(3) call and makes sure the application can use the same memory allocator as librdkafka for allocating pointers that are used by librdkafka.

    + *

    rk can be set to return memory allocated by a specific rk instance otherwise pass NULL for rk.

    + *
    Remarks
    Memory allocated by rd_kafka_mem_malloc() must be freed using rd_kafka_mem_free()
    * @param \FFI\CData|null $rk rd_kafka_t* * @param int|null $size size_t * @return \FFI\CData|object|string|null void* * @since 1.7.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a60f3ac40bed72542f68c93fa1bc16d3e */ public static function rd_kafka_mem_malloc(?\FFI\CData $rk, ?int $size) { @@ -4403,9 +5295,18 @@ public static function rd_kafka_mock_broker_push_request_error_rtts(?\FFI\CData } /** + *

    Enable/disable creation of a queue specific to SASL events and callbacks.

    + *

    For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this configuration API allows an application to get a dedicated queue for the SASL events/callbacks. After enabling the queue with this API the application can retrieve the queue by calling rd_kafka_queue_get_sasl() on the client instance. This queue may then be served directly by the application (with rd_kafka_queue_poll(), et.al) or forwarded to another queue, such as the background queue.

    + *

    A convenience function is available to automatically forward the SASL queue to librdkafka's background thread, see rd_kafka_sasl_background_callbacks_enable().

    + *

    By default (enable = 0) the main queue (as served by rd_kafka_poll(), et.al.) is used for SASL callbacks.

    + *
    Remarks
    The SASL queue is currently only used by the SASL OAUTHBEARER mechanism's token_refresh_cb().
    + *
    See also
    rd_kafka_queue_get_sasl()
    + *
    + * rd_kafka_sasl_background_callbacks_enable()
    * @param \FFI\CData|null $conf rd_kafka_conf_t* * @param int|null $enable int * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad01f5e5a40eaebf133db5a2858198fbd */ public static function rd_kafka_conf_enable_sasl_queue(?\FFI\CData $conf, ?int $enable): void { @@ -4413,9 +5314,12 @@ public static function rd_kafka_conf_enable_sasl_queue(?\FFI\CData $conf, ?int $ } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @return \FFI\CData|null rd_kafka_queue_t* + *

    Use rd_kafka_queue_destroy() to loose the reference.

    + *
    See also
    rd_kafka_sasl_background_callbacks_enable()
    + * @param \FFI\CData|null $rk rd_kafka_t* - ) + * @return \FFI\CData|null rd_kafka_queue_t* - a reference to the SASL callback queue, if a SASL mechanism with callbacks is configured (currently only OAUTHBEARER), else returns NULL. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a56d2a479cb1e2fb540db1f25f45b7ece */ public static function rd_kafka_queue_get_sasl(?\FFI\CData $rk): ?\FFI\CData { @@ -4423,9 +5327,16 @@ public static function rd_kafka_queue_get_sasl(?\FFI\CData $rk): ?\FFI\CData } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @return \FFI\CData|null rd_kafka_error_t* + *

    Enable SASL OAUTHBEARER refresh callbacks on the librdkafka background thread.

    + *

    This serves as an alternative for applications that do not call rd_kafka_poll() (et.al.) at regular intervals (or not at all), as a means of automatically trigger the refresh callbacks, which are needed to initiate connections to the brokers in the case a custom OAUTHBEARER refresh callback is configured.

    + * + *
    See also
    rd_kafka_queue_get_sasl()
    + *
    + * rd_kafka_conf_set_oauthbearer_token_refresh_cb()
    + * @param \FFI\CData|null $rk rd_kafka_t* - ) + * @return \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on error. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a383a661dc45191bccb3a2b50adbbfcdb */ public static function rd_kafka_sasl_background_callbacks_enable(?\FFI\CData $rk): ?\FFI\CData { @@ -4433,10 +5344,17 @@ public static function rd_kafka_sasl_background_callbacks_enable(?\FFI\CData $rk } /** + *

    Asynchronously close the consumer.

    + *

    Performs the same actions as rd_kafka_consumer_close() but in a background thread.

    + *

    Rebalance events/callbacks (etc) will be forwarded to the application-provided rkqu. The application must poll/serve this queue until rd_kafka_consumer_closed() returns true.

    + *
    Remarks
    Depending on consumer group join state there may or may not be rebalance events emitted on rkqu.
    + * + *
    See also
    rd_kafka_consumer_closed()
    * @param \FFI\CData|null $rk rd_kafka_t* * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - * @return \FFI\CData|null rd_kafka_error_t* + * @return \FFI\CData|null rd_kafka_error_t* - an error object if the consumer close failed, else NULL. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9dd5c18bdfed81c8847b259f0a8d498d */ public static function rd_kafka_consumer_close_queue(?\FFI\CData $rk, ?\FFI\CData $rkqu): ?\FFI\CData { @@ -4444,9 +5362,12 @@ public static function rd_kafka_consumer_close_queue(?\FFI\CData $rk, ?\FFI\CDat } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @return int|null int + *

    Should be used in conjunction with rd_kafka_consumer_close_queue() to know when the consumer has been closed.

    + *
    See also
    rd_kafka_consumer_close_queue()
    + * @param \FFI\CData|null $rk rd_kafka_t* - ) + * @return int|null int - 1 if the consumer is closed, else 0. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a88197205c61ed98b99f6442551a5e94f */ public static function rd_kafka_consumer_closed(?\FFI\CData $rk): ?int { @@ -4454,9 +5375,11 @@ public static function rd_kafka_consumer_closed(?\FFI\CData $rk): ?int } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_CreateAcls_result_t* + *

    Event types: RD_KAFKA_EVENT_CREATEACLS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_CreateAcls_result_t* - the result of a CreateAcls request, or NULL if event is of different type. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa605a0d0500ccb783ed7da23670075d9 */ public static function rd_kafka_event_CreateAcls_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -4464,9 +5387,11 @@ public static function rd_kafka_event_CreateAcls_result(?\FFI\CData $rkev): ?\FF } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_DescribeAcls_result_t* + *

    Event types: RD_KAFKA_EVENT_DESCRIBEACLS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_DescribeAcls_result_t* - the result of a DescribeAcls request, or NULL if event is of different type. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#adba2388c1eac2bb6d3e56fdaf4da0839 */ public static function rd_kafka_event_DescribeAcls_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -4474,9 +5399,11 @@ public static function rd_kafka_event_DescribeAcls_result(?\FFI\CData $rkev): ?\ } /** - * @param \FFI\CData|null $rkev rd_kafka_event_t* - * @return \FFI\CData|null const rd_kafka_DeleteAcls_result_t* + *

    Event types: RD_KAFKA_EVENT_DELETEACLS_RESULT

    + * @param \FFI\CData|null $rkev rd_kafka_event_t* - ) + * @return \FFI\CData|null const rd_kafka_DeleteAcls_result_t* - the result of a DeleteAcls request, or NULL if event is of different type. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9b456786cd867418fd7f8e3cb5f25478 */ public static function rd_kafka_event_DeleteAcls_result(?\FFI\CData $rkev): ?\FFI\CData { @@ -4484,9 +5411,11 @@ public static function rd_kafka_event_DeleteAcls_result(?\FFI\CData $rkev): ?\FF } /** - * @param int $resource_pattern_type rd_kafka_ResourcePatternType_t - * @return string|null const char* + * + * @param int $resource_pattern_type rd_kafka_ResourcePatternType_t - ) + * @return string|null const char* - a string representation of the resource_pattern_type * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a488574082a549170e09adb67faca2f46 */ public static function rd_kafka_ResourcePatternType_name(int $resource_pattern_type): ?string { @@ -4494,9 +5423,11 @@ public static function rd_kafka_ResourcePatternType_name(int $resource_pattern_t } /** - * @param \FFI\CData|null $aclres const rd_kafka_acl_result_t* - * @return \FFI\CData|null const rd_kafka_error_t* + * + * @param \FFI\CData|null $aclres const rd_kafka_acl_result_t* - ) + * @return \FFI\CData|null const rd_kafka_error_t* - the error object for the given acl result, or NULL on success. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aebde4f3d06cf6ae74b4be39bca9107bb */ public static function rd_kafka_acl_result_error(?\FFI\CData $aclres): ?\FFI\CData { @@ -4504,9 +5435,11 @@ public static function rd_kafka_acl_result_error(?\FFI\CData $aclres): ?\FFI\CDa } /** - * @param int $acl_operation rd_kafka_AclOperation_t - * @return string|null const char* + * + * @param int $acl_operation rd_kafka_AclOperation_t - ) + * @return string|null const char* - a string representation of the acl_operation * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#afeab00c55bf43b5152ecef5167baa227 */ public static function rd_kafka_AclOperation_name(int $acl_operation): ?string { @@ -4514,9 +5447,11 @@ public static function rd_kafka_AclOperation_name(int $acl_operation): ?string } /** - * @param int $acl_permission_type rd_kafka_AclPermissionType_t - * @return string|null const char* + * + * @param int $acl_permission_type rd_kafka_AclPermissionType_t - ) + * @return string|null const char* - a string representation of the acl_permission_type * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9df29768d60a17013bb4e2860378f248 */ public static function rd_kafka_AclPermissionType_name(int $acl_permission_type): ?string { @@ -4524,17 +5459,19 @@ public static function rd_kafka_AclPermissionType_name(int $acl_permission_type) } /** - * @param int $restype rd_kafka_ResourceType_t - * @param string|null $name const char* - * @param int $resource_pattern_type rd_kafka_ResourcePatternType_t - * @param string|null $principal const char* - * @param string|null $host const char* - * @param int $operation rd_kafka_AclOperation_t - * @param int $permission_type rd_kafka_AclPermissionType_t - * @param \FFI\CData|null $errstr char* - * @param int|null $errstr_size size_t - * @return \FFI\CData|null rd_kafka_AclBinding_t* + *

    Create a new AclBinding object. This object is later passed to rd_kafka_CreateAcls().

    + * @param int $restype rd_kafka_ResourceType_t - The ResourceType. + * @param string|null $name const char* - The resource name. + * @param int $resource_pattern_type rd_kafka_ResourcePatternType_t - The pattern type. + * @param string|null $principal const char* - A principal, following the kafka specification. + * @param string|null $host const char* - An hostname or ip. + * @param int $operation rd_kafka_AclOperation_t - A Kafka operation. + * @param int $permission_type rd_kafka_AclPermissionType_t - A Kafka permission type. + * @param \FFI\CData|null $errstr char* - An error string for returning errors or NULL to not use it. + * @param int|null $errstr_size size_t - The errstr size or 0 to not use it. + * @return \FFI\CData|null rd_kafka_AclBinding_t* - a new allocated AclBinding object, or NULL if the input parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when done. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a3475335e39b68c6033c87e05f5a4c5fa */ public static function rd_kafka_AclBinding_new(int $restype, ?string $name, int $resource_pattern_type, ?string $principal, ?string $host, int $operation, int $permission_type, ?\FFI\CData $errstr, ?int $errstr_size): ?\FFI\CData { @@ -4542,17 +5479,19 @@ public static function rd_kafka_AclBinding_new(int $restype, ?string $name, int } /** - * @param int $restype rd_kafka_ResourceType_t - * @param string|null $name const char* - * @param int $resource_pattern_type rd_kafka_ResourcePatternType_t - * @param string|null $principal const char* - * @param string|null $host const char* - * @param int $operation rd_kafka_AclOperation_t - * @param int $permission_type rd_kafka_AclPermissionType_t - * @param \FFI\CData|null $errstr char* - * @param int|null $errstr_size size_t - * @return \FFI\CData|null rd_kafka_AclBindingFilter_t* + *

    Create a new AclBindingFilter object. This object is later passed to rd_kafka_DescribeAcls() or rd_kafka_DeletesAcls() in order to filter the acls to retrieve or to delete. Use the same rd_kafka_AclBinding functions to query or destroy it.

    + * @param int $restype rd_kafka_ResourceType_t - The ResourceType or RD_KAFKA_RESOURCE_ANY if not filtering by this field. + * @param string|null $name const char* - The resource name or NULL if not filtering by this field. + * @param int $resource_pattern_type rd_kafka_ResourcePatternType_t - The pattern type or RD_KAFKA_RESOURCE_PATTERN_ANY if not filtering by this field. + * @param string|null $principal const char* - A principal or NULL if not filtering by this field. + * @param string|null $host const char* - An hostname or ip or NULL if not filtering by this field. + * @param int $operation rd_kafka_AclOperation_t - A Kafka operation or RD_KAFKA_ACL_OPERATION_ANY if not filtering by this field. + * @param int $permission_type rd_kafka_AclPermissionType_t - A Kafka permission type or RD_KAFKA_ACL_PERMISSION_TYPE_ANY if not filtering by this field. + * @param \FFI\CData|null $errstr char* - An error string for returning errors or NULL to not use it. + * @param int|null $errstr_size size_t - The errstr size or 0 to not use it. + * @return \FFI\CData|null rd_kafka_AclBindingFilter_t* - a new allocated AclBindingFilter object, or NULL if the input parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when done. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a20003b0ae6aeb68f02f3929c5c797372 */ public static function rd_kafka_AclBindingFilter_new(int $restype, ?string $name, int $resource_pattern_type, ?string $principal, ?string $host, int $operation, int $permission_type, ?\FFI\CData $errstr, ?int $errstr_size): ?\FFI\CData { @@ -4560,9 +5499,11 @@ public static function rd_kafka_AclBindingFilter_new(int $restype, ?string $name } /** - * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - * @return int rd_kafka_ResourceType_t + * + * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - ) + * @return int rd_kafka_ResourceType_t - the resource type for the given acl binding. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af576f429f52ace1379cf9d2a4c453d8c */ public static function rd_kafka_AclBinding_restype(?\FFI\CData $acl): int { @@ -4570,9 +5511,11 @@ public static function rd_kafka_AclBinding_restype(?\FFI\CData $acl): int } /** - * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - * @return string|null const char* + *
    Remarks
    lifetime of the returned string is the same as the acl.
    + * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - ) + * @return string|null const char* - the resource name for the given acl binding. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acb03634b4456326f3592b79584c551ea */ public static function rd_kafka_AclBinding_name(?\FFI\CData $acl): ?string { @@ -4580,9 +5523,11 @@ public static function rd_kafka_AclBinding_name(?\FFI\CData $acl): ?string } /** - * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - * @return string|null const char* + *
    Remarks
    lifetime of the returned string is the same as the acl.
    + * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - ) + * @return string|null const char* - the principal for the given acl binding. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad99b0868262484fc46b7c36b7db2173b */ public static function rd_kafka_AclBinding_principal(?\FFI\CData $acl): ?string { @@ -4590,9 +5535,11 @@ public static function rd_kafka_AclBinding_principal(?\FFI\CData $acl): ?string } /** - * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - * @return string|null const char* + *
    Remarks
    lifetime of the returned string is the same as the acl.
    + * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - ) + * @return string|null const char* - the host for the given acl binding. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8c242229f6b0b8faeb078fc814261fa7 */ public static function rd_kafka_AclBinding_host(?\FFI\CData $acl): ?string { @@ -4600,9 +5547,11 @@ public static function rd_kafka_AclBinding_host(?\FFI\CData $acl): ?string } /** - * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - * @return int rd_kafka_AclOperation_t + * + * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - ) + * @return int rd_kafka_AclOperation_t - the acl operation for the given acl binding. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab4c620d7a3155c7f8cbf5556b99c2b7d */ public static function rd_kafka_AclBinding_operation(?\FFI\CData $acl): int { @@ -4610,9 +5559,11 @@ public static function rd_kafka_AclBinding_operation(?\FFI\CData $acl): int } /** - * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - * @return int rd_kafka_AclPermissionType_t + * + * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - ) + * @return int rd_kafka_AclPermissionType_t - the permission type for the given acl binding. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aadb093357b1ed866ae99c710bd5e44f3 */ public static function rd_kafka_AclBinding_permission_type(?\FFI\CData $acl): int { @@ -4620,9 +5571,11 @@ public static function rd_kafka_AclBinding_permission_type(?\FFI\CData $acl): in } /** - * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - * @return int rd_kafka_ResourcePatternType_t + * + * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - ) + * @return int rd_kafka_ResourcePatternType_t - the resource pattern type for the given acl binding. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a7c24d7c8312ea58cfdd18f8bc06fc5c8 */ public static function rd_kafka_AclBinding_resource_pattern_type(?\FFI\CData $acl): int { @@ -4630,9 +5583,11 @@ public static function rd_kafka_AclBinding_resource_pattern_type(?\FFI\CData $ac } /** - * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - * @return \FFI\CData|null const rd_kafka_error_t* + * + * @param \FFI\CData|null $acl const rd_kafka_AclBinding_t* - ) + * @return \FFI\CData|null const rd_kafka_error_t* - the error object for the given acl binding, or NULL on success. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a047abe192b6445a37cbab1f1be355f3d */ public static function rd_kafka_AclBinding_error(?\FFI\CData $acl): ?\FFI\CData { @@ -4659,10 +5614,13 @@ public static function rd_kafka_AclBinding_destroy_array(?\FFI\CData $acl_bindin } /** - * @param \FFI\CData|null $result const rd_kafka_CreateAcls_result_t* - * @param \FFI\CData|null $cntp size_t* + *

    Get an array of acl results from a CreateAcls result.

    + *

    The returned acl result life-time is the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_CreateAcls_result_t* - CreateAcls result to get acl results from. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. * @return \FFI\CData|null const rd_kafka_acl_result_t** * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abfcceea2bb29d6fd800f11224f0f4238 */ public static function rd_kafka_CreateAcls_result_acls(?\FFI\CData $result, ?\FFI\CData $cntp): ?\FFI\CData { @@ -4670,12 +5628,19 @@ public static function rd_kafka_CreateAcls_result_acls(?\FFI\CData $result, ?\FF } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $new_acls rd_kafka_AclBinding_t** - * @param int|null $new_acls_cnt size_t - * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - * @param \FFI\CData|null $rkqu rd_kafka_queue_t* + *

    Create acls as specified by the new_acls array of size new_topic_cnt elements.

    + * + *

    Supported admin options:

    + *
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_CREATEACLS_RESULT
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param \FFI\CData|null $new_acls rd_kafka_AclBinding_t** - Array of new acls to create. + * @param int|null $new_acls_cnt size_t - Number of elements in new_acls array. + * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults. + * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - Queue to emit result on. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a028d1a0ff870bcea561d3cf474aa7265 */ public static function rd_kafka_CreateAcls(?\FFI\CData $rk, ?\FFI\CData $new_acls, ?int $new_acls_cnt, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { @@ -4683,10 +5648,15 @@ public static function rd_kafka_CreateAcls(?\FFI\CData $rk, ?\FFI\CData $new_acl } /** - * @param \FFI\CData|null $result const rd_kafka_DescribeAcls_result_t* - * @param \FFI\CData|null $cntp size_t* + *

    Get an array of resource results from a DescribeAcls result.

    + *

    + * - describe access control lists.

    + *

    The returned resources life-time is the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_DescribeAcls_result_t* - DescribeAcls result to get acls from. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. * @return \FFI\CData|null const rd_kafka_AclBinding_t** * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa5d797f84a5dcf3c069636c1dc408934 */ public static function rd_kafka_DescribeAcls_result_acls(?\FFI\CData $result, ?\FFI\CData $cntp): ?\FFI\CData { @@ -4694,11 +5664,18 @@ public static function rd_kafka_DescribeAcls_result_acls(?\FFI\CData $result, ?\ } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $acl_filter rd_kafka_AclBindingFilter_t* - * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - * @param \FFI\CData|null $rkqu rd_kafka_queue_t* + *

    Describe acls matching the filter provided in acl_filter.

    + * + *

    Supported admin options:

    + *
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param \FFI\CData|null $acl_filter rd_kafka_AclBindingFilter_t* - Filter for the returned acls. + * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults. + * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - Queue to emit result on. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a7dcf8b0d43014b7412d9a5059006194b */ public static function rd_kafka_DescribeAcls(?\FFI\CData $rk, ?\FFI\CData $acl_filter, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { @@ -4706,10 +5683,13 @@ public static function rd_kafka_DescribeAcls(?\FFI\CData $rk, ?\FFI\CData $acl_f } /** - * @param \FFI\CData|null $result const rd_kafka_DeleteAcls_result_t* - * @param \FFI\CData|null $cntp size_t* + *

    Get an array of DeleteAcls result responses from a DeleteAcls result.

    + *

    The returned responses life-time is the same as the result object.

    + * @param \FFI\CData|null $result const rd_kafka_DeleteAcls_result_t* - DeleteAcls result to get responses from. + * @param \FFI\CData|null $cntp size_t* - is updated to the number of elements in the array. * @return \FFI\CData|null const rd_kafka_DeleteAcls_result_response_t** * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af22ab28bfc36ac13ee043e95b0741dc0 */ public static function rd_kafka_DeleteAcls_result_responses(?\FFI\CData $result, ?\FFI\CData $cntp): ?\FFI\CData { @@ -4717,9 +5697,11 @@ public static function rd_kafka_DeleteAcls_result_responses(?\FFI\CData $result, } /** - * @param \FFI\CData|null $result_response const rd_kafka_DeleteAcls_result_response_t* - * @return \FFI\CData|null const rd_kafka_error_t* + * + * @param \FFI\CData|null $result_response const rd_kafka_DeleteAcls_result_response_t* - ) + * @return \FFI\CData|null const rd_kafka_error_t* - the error object for the given DeleteAcls result response, or NULL on success. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a519cf227ea6f1cadd19a767d430209c1 */ public static function rd_kafka_DeleteAcls_result_response_error(?\FFI\CData $result_response): ?\FFI\CData { @@ -4727,10 +5709,12 @@ public static function rd_kafka_DeleteAcls_result_response_error(?\FFI\CData $re } /** + *
    Remarks
    lifetime of the returned acl bindings is the same as the result_response.
    * @param \FFI\CData|null $result_response const rd_kafka_DeleteAcls_result_response_t* * @param \FFI\CData|null $matching_acls_cntp size_t* - * @return \FFI\CData|null const rd_kafka_AclBinding_t** + * @return \FFI\CData|null const rd_kafka_AclBinding_t** - the matching acls array for the given DeleteAcls result response. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a157a9b9ab67703b3e531a67f7a8da7de */ public static function rd_kafka_DeleteAcls_result_response_matching_acls(?\FFI\CData $result_response, ?\FFI\CData $matching_acls_cntp): ?\FFI\CData { @@ -4738,12 +5722,19 @@ public static function rd_kafka_DeleteAcls_result_response_matching_acls(?\FFI\C } /** - * @param \FFI\CData|null $rk rd_kafka_t* - * @param \FFI\CData|null $del_acls rd_kafka_AclBindingFilter_t** - * @param int|null $del_acls_cnt size_t - * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - * @param \FFI\CData|null $rkqu rd_kafka_queue_t* + *

    Delete acls matching the filteres provided in del_acls array of size del_acls_cnt.

    + * + *

    Supported admin options:

    + *
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETEACLS_RESULT
    + * @param \FFI\CData|null $rk rd_kafka_t* - Client instance. + * @param \FFI\CData|null $del_acls rd_kafka_AclBindingFilter_t** - Filters for the acls to delete. + * @param int|null $del_acls_cnt size_t - Number of elements in del_acls array. + * @param \FFI\CData|null $options const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults. + * @param \FFI\CData|null $rkqu rd_kafka_queue_t* - Queue to emit result on. * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6118e40b9abfff686a61dc5e093468ef */ public static function rd_kafka_DeleteAcls(?\FFI\CData $rk, ?\FFI\CData $del_acls, ?int $del_acls_cnt, ?\FFI\CData $options, ?\FFI\CData $rkqu): void { diff --git a/src/RdKafka/FFI/Versions/1.0.0.php b/src/RdKafka/FFI/Versions/1.0.0.php index 611365d4..5a4e2ce5 100644 --- a/src/RdKafka/FFI/Versions/1.0.0.php +++ b/src/RdKafka/FFI/Versions/1.0.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 16777471; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 74; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 75; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.0.1.php b/src/RdKafka/FFI/Versions/1.0.1.php index deeed065..f2f90354 100644 --- a/src/RdKafka/FFI/Versions/1.0.1.php +++ b/src/RdKafka/FFI/Versions/1.0.1.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.0.1 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 16777727; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 82; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.1.0.php b/src/RdKafka/FFI/Versions/1.1.0.php index 0bc6c745..35d537ba 100644 --- a/src/RdKafka/FFI/Versions/1.1.0.php +++ b/src/RdKafka/FFI/Versions/1.1.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.1.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 16843007; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 82; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.2.0.php b/src/RdKafka/FFI/Versions/1.2.0.php index d8635a68..a9206475 100644 --- a/src/RdKafka/FFI/Versions/1.2.0.php +++ b/src/RdKafka/FFI/Versions/1.2.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.2.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 16908543; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.2.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 82; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.2.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.2.1.php b/src/RdKafka/FFI/Versions/1.2.1.php index 1af1db2c..64fcac11 100644 --- a/src/RdKafka/FFI/Versions/1.2.1.php +++ b/src/RdKafka/FFI/Versions/1.2.1.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.2.1 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 16908799; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.2.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 82; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.2.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.2.2.php b/src/RdKafka/FFI/Versions/1.2.2.php index 0a089839..a8465597 100644 --- a/src/RdKafka/FFI/Versions/1.2.2.php +++ b/src/RdKafka/FFI/Versions/1.2.2.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.2.2 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 16909055; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.2.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 82; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.2.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.3.0.php b/src/RdKafka/FFI/Versions/1.3.0.php index ce3d353c..57d09dc8 100644 --- a/src/RdKafka/FFI/Versions/1.3.0.php +++ b/src/RdKafka/FFI/Versions/1.3.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.3.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 16974079; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.3.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 82; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.3.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.4.0.php b/src/RdKafka/FFI/Versions/1.4.0.php index 98906dd3..606cff56 100644 --- a/src/RdKafka/FFI/Versions/1.4.0.php +++ b/src/RdKafka/FFI/Versions/1.4.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.4.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17039615; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 83; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.4.2.php b/src/RdKafka/FFI/Versions/1.4.2.php index 7474a86d..5daccf4f 100644 --- a/src/RdKafka/FFI/Versions/1.4.2.php +++ b/src/RdKafka/FFI/Versions/1.4.2.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.4.2 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17040127; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.4.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 83; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.4.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.4.4.php b/src/RdKafka/FFI/Versions/1.4.4.php index 5dd976f9..e4c0cd8f 100644 --- a/src/RdKafka/FFI/Versions/1.4.4.php +++ b/src/RdKafka/FFI/Versions/1.4.4.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.4.4 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17040639; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.4.4 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 83; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.4.4 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.5.0.php b/src/RdKafka/FFI/Versions/1.5.0.php index d6ac6494..a4645516 100644 --- a/src/RdKafka/FFI/Versions/1.5.0.php +++ b/src/RdKafka/FFI/Versions/1.5.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.5.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17105151; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.5.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 83; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.5.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.5.2.php b/src/RdKafka/FFI/Versions/1.5.2.php index 53a3e411..bb27834b 100644 --- a/src/RdKafka/FFI/Versions/1.5.2.php +++ b/src/RdKafka/FFI/Versions/1.5.2.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.5.2 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17105663; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.5.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 89; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.5.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.5.3.php b/src/RdKafka/FFI/Versions/1.5.3.php index 0d9f6c03..12e0c0c4 100644 --- a/src/RdKafka/FFI/Versions/1.5.3.php +++ b/src/RdKafka/FFI/Versions/1.5.3.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.5.3 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17105919; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.5.3 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 89; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.5.3 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 6; diff --git a/src/RdKafka/FFI/Versions/1.6.0.php b/src/RdKafka/FFI/Versions/1.6.0.php index 795fc682..b533a24f 100644 --- a/src/RdKafka/FFI/Versions/1.6.0.php +++ b/src/RdKafka/FFI/Versions/1.6.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.6.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17170687; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 9; diff --git a/src/RdKafka/FFI/Versions/1.6.1.php b/src/RdKafka/FFI/Versions/1.6.1.php index b6615be2..492aa8ee 100644 --- a/src/RdKafka/FFI/Versions/1.6.1.php +++ b/src/RdKafka/FFI/Versions/1.6.1.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.6.1 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17170943; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.6.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.6.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 9; diff --git a/src/RdKafka/FFI/Versions/1.6.2.php b/src/RdKafka/FFI/Versions/1.6.2.php index fc74eb1d..9132d59b 100644 --- a/src/RdKafka/FFI/Versions/1.6.2.php +++ b/src/RdKafka/FFI/Versions/1.6.2.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.6.2 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17171199; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.6.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.6.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 9; diff --git a/src/RdKafka/FFI/Versions/1.7.0.php b/src/RdKafka/FFI/Versions/1.7.0.php index 5525d9e5..9e87c422 100644 --- a/src/RdKafka/FFI/Versions/1.7.0.php +++ b/src/RdKafka/FFI/Versions/1.7.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.7.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17236223; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.7.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.7.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 9; diff --git a/src/RdKafka/FFI/Versions/1.8.0.php b/src/RdKafka/FFI/Versions/1.8.0.php index cf095ae4..d8a7cc00 100644 --- a/src/RdKafka/FFI/Versions/1.8.0.php +++ b/src/RdKafka/FFI/Versions/1.8.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.8.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17301759; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.8.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.8.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 9; diff --git a/src/RdKafka/FFI/Versions/1.8.2.php b/src/RdKafka/FFI/Versions/1.8.2.php index b4cf5a93..7f0414fa 100644 --- a/src/RdKafka/FFI/Versions/1.8.2.php +++ b/src/RdKafka/FFI/Versions/1.8.2.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.8.2 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17302271; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.8.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.8.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 9; diff --git a/src/RdKafka/FFI/Versions/1.9.0.php b/src/RdKafka/FFI/Versions/1.9.0.php index 1b0862aa..86369d15 100644 --- a/src/RdKafka/FFI/Versions/1.9.0.php +++ b/src/RdKafka/FFI/Versions/1.9.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.9.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17367295; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 12; diff --git a/src/RdKafka/FFI/Versions/1.9.1.php b/src/RdKafka/FFI/Versions/1.9.1.php index 554bbb70..07f3d6de 100644 --- a/src/RdKafka/FFI/Versions/1.9.1.php +++ b/src/RdKafka/FFI/Versions/1.9.1.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.9.1 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17367551; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.9.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.9.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 12; diff --git a/src/RdKafka/FFI/Versions/1.9.2.php b/src/RdKafka/FFI/Versions/1.9.2.php index 1dfd7ec4..704d8786 100644 --- a/src/RdKafka/FFI/Versions/1.9.2.php +++ b/src/RdKafka/FFI/Versions/1.9.2.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 1.9.2 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 17367807; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 1.9.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 1.9.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 12; diff --git a/src/RdKafka/FFI/Versions/2.0.0.php b/src/RdKafka/FFI/Versions/2.0.0.php index 31d9bd73..76ad39d3 100644 --- a/src/RdKafka/FFI/Versions/2.0.0.php +++ b/src/RdKafka/FFI/Versions/2.0.0.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 2.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 33554687; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 2.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 2.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 16; diff --git a/src/RdKafka/FFI/Versions/2.0.1.php b/src/RdKafka/FFI/Versions/2.0.1.php index eae7f547..c9d811bc 100644 --- a/src/RdKafka/FFI/Versions/2.0.1.php +++ b/src/RdKafka/FFI/Versions/2.0.1.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 2.0.1 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 33554687; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 2.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 2.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 16; diff --git a/src/RdKafka/FFI/Versions/2.0.2.php b/src/RdKafka/FFI/Versions/2.0.2.php index e02ec4b3..4507127d 100644 --- a/src/RdKafka/FFI/Versions/2.0.2.php +++ b/src/RdKafka/FFI/Versions/2.0.2.php @@ -20,13 +20,14 @@ *

    E.g.: 0x000801ff = 0.8.1

    *
    Remarks
    This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
    * @since 2.0.2 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2e242fb8620a32b650a40575bc7f98e */ const RD_KAFKA_VERSION = 33555199; /** - * enum rd_kafka_resp_err_t + *

    Unsupported compression type

    * @since 2.0.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76; @@ -37,8 +38,9 @@ const RD_KAFKA_RESP_ERR_END_ALL = 98; /** - * enum rd_kafka_admin_op_t + *

    Number of ops defined

    * @since 2.0.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP__CNT = 16; diff --git a/src/constants.php b/src/constants.php index 0239472f..68da6309 100644 --- a/src/constants.php +++ b/src/constants.php @@ -22,8 +22,12 @@ // librdkafka overall constants /** - * define + *

    Flags for rd_kafka_destroy_flags()

    + *

    Don't call consumer_close() to leave group and commit final offsets.

    + *

    This also disables consumer callbacks to be called from rd_kafka_destroy*(), such as rebalance_cb.

    + *

    The consumer group handler is still closed internally, but from an application perspective none of the functionality from consumer_close() is performed.

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af2a79b2f2bef22e06ed9fade159f42d4 */ const RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE = 8; @@ -31,35 +35,35 @@ *

    Unassigned partition.

    *

    The unassigned partition is used by the producer API for messages that should be partitioned using the configured or default partitioner.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a3002d1858385de283ea004893e352863 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a3002d1858385de283ea004893e352863 */ const RD_KAFKA_PARTITION_UA = -1; /** - *

    Start consuming from beginning of kafka partition queue: oldest msg

    + *

    Start consuming from beginning of \ kafka partition queue: oldest msg

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a32dc6dd93c16e3aac9b89804c4817fba + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a32dc6dd93c16e3aac9b89804c4817fba */ const RD_KAFKA_OFFSET_BEGINNING = -2; /** - *

    Start consuming from end of kafka partition queue: next msg

    + *

    Start consuming from end of kafka \ partition queue: next msg

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aa7aaaf16e5bd7c0a8a8cb014275c3e06 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa7aaaf16e5bd7c0a8a8cb014275c3e06 */ const RD_KAFKA_OFFSET_END = -1; /** - *

    Start consuming from offset retrieved from offset store

    + *

    Start consuming from offset retrieved \ from offset store

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a727dc7080140da43adbd5d0b170d49be + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a727dc7080140da43adbd5d0b170d49be */ const RD_KAFKA_OFFSET_STORED = -1000; /** *

    Invalid offset

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ac2e48c4fef9e959ab43cad60ade84af1 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ac2e48c4fef9e959ab43cad60ade84af1 */ const RD_KAFKA_OFFSET_INVALID = -1001; @@ -71,1324 +75,1418 @@ /** *

    Producer message flags.

    - *

    Delegate freeing of payload to rdkafka.

    + *

    Delegate freeing of payload to rdkafka. \

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a21be13f8a4cb1d5aff01419f333e5ea7 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a21be13f8a4cb1d5aff01419f333e5ea7 */ const RD_KAFKA_MSG_F_FREE = 1; /** - *

    rdkafka will make a copy of the payload.

    + *

    rdkafka will make a copy of the payload. \

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ad7468ab0ece73cc9cb6253a3dcfe702d + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad7468ab0ece73cc9cb6253a3dcfe702d */ const RD_KAFKA_MSG_F_COPY = 2; /** - *

    Block produce*() on message queue full. WARNING: If a delivery report callback is used the application MUST call rd_kafka_poll() (or equiv.) to make sure delivered messages are drained from the internal delivery report queue. Failure to do so will result in indefinately blocking on the produce() call when the message queue is full.

    + *

    Block produce*() on message queue full. \ WARNING: If a delivery report callback \ is used the application MUST \ call rd_kafka_poll() (or equiv.) \ to make sure delivered messages \ are drained from the internal \ delivery report queue. \ Failure to do so will result \ in indefinately blocking on \ the produce() call when the \ message queue is full.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#aca3cdf1c55668f4aa1c2391ddd39c9c2 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aca3cdf1c55668f4aa1c2391ddd39c9c2 */ const RD_KAFKA_MSG_F_BLOCK = 4; /** - * define + *

    produce_batch() will honor \ per-message partition.

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a991bd9378d2fc5b2102ce3a29805b345 */ const RD_KAFKA_MSG_F_PARTITION = 8; /** - * define + *

    Flags for rd_kafka_purge()

    + *

    Purge messages in internal queues.

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad5bcdab21e406a23d50cdb1c68e95a34 */ const RD_KAFKA_PURGE_F_QUEUE = 1; /** - * define + *

    Purge messages in-flight to or from the broker. Purging these messages will void any future acknowledgements from the broker, making it impossible for the application to know if these messages were successfully delivered or not. Retrying these messages may lead to duplicates.

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af232512144175a21b5bda2a1fcbe1f00 */ const RD_KAFKA_PURGE_F_INFLIGHT = 2; /** - * define + *

    Don't wait for background thread queue purging to finish.

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aef197fd7fd6dfa02d70563e359b8281f */ const RD_KAFKA_PURGE_F_NON_BLOCKING = 4; /** - * define + *

    Unset value

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa2707dd1a6225e7649fd5d825284da4d */ const RD_KAFKA_EVENT_NONE = 0; /** *

    Producer Delivery report batch

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#abfe880d05ff52138b26dbe8b8e0d2132 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#abfe880d05ff52138b26dbe8b8e0d2132 */ const RD_KAFKA_EVENT_DR = 1; /** *

    Fetched message (consumer)

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#acfddfd9f3d49591dcd9e7f323dbcd865 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acfddfd9f3d49591dcd9e7f323dbcd865 */ const RD_KAFKA_EVENT_FETCH = 2; /** *

    Log message

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a6265a9eeee57e83eb9f3bbd33d92700f + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6265a9eeee57e83eb9f3bbd33d92700f */ const RD_KAFKA_EVENT_LOG = 4; /** *

    Error

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a080a7ad60de643f47424031ee95da103 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a080a7ad60de643f47424031ee95da103 */ const RD_KAFKA_EVENT_ERROR = 8; /** *

    Group rebalance (consumer)

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a271e6a5984932015585dd5248535aa2b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a271e6a5984932015585dd5248535aa2b */ const RD_KAFKA_EVENT_REBALANCE = 16; /** *

    Offset commit result

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a73a29f22b22433a93253a5f77c866437 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a73a29f22b22433a93253a5f77c866437 */ const RD_KAFKA_EVENT_OFFSET_COMMIT = 32; /** - * define + *

    Stats

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a67070a77150f54039273097c57da5965 */ const RD_KAFKA_EVENT_STATS = 64; /** - * define + *

    CreateTopics_result_t

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ae0622aab30c391aed6c8b57d1aa5d0a8 */ const RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100; /** - * define + *

    DeleteTopics_result_t

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af69b2889c6e9cc2ebb03c607efade311 */ const RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101; /** - * define + *

    CreatePartitions_result_t

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a82de17d7d0eb7ac80761a35695b1f971 */ const RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT = 102; /** - * define + *

    AlterConfigs_result_t

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a25ded37b0459f4207e033ead15d30909 */ const RD_KAFKA_EVENT_ALTERCONFIGS_RESULT = 103; /** - * define + *

    DescribeConfigs_result_t

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5918e726a9b2828c3051e48d6a747259 */ const RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT = 104; /** *

    Producer client

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ac6f9c3cb01cbaf3013689c4f2731b831 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ac6f9c3cb01cbaf3013689c4f2731b831 */ const RD_KAFKA_PRODUCER = 0; /** *

    Consumer client

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ac6f9c3cb01cbaf3013689c4f2731b831 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ac6f9c3cb01cbaf3013689c4f2731b831 */ const RD_KAFKA_CONSUMER = 1; /** *

    Timestamp not available

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#af7cb459a230a61489234823da2beb3f3 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af7cb459a230a61489234823da2beb3f3 */ const RD_KAFKA_TIMESTAMP_NOT_AVAILABLE = 0; /** *

    Message creation time

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#af7cb459a230a61489234823da2beb3f3 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af7cb459a230a61489234823da2beb3f3 */ const RD_KAFKA_TIMESTAMP_CREATE_TIME = 1; /** *

    Log append time

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#af7cb459a230a61489234823da2beb3f3 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af7cb459a230a61489234823da2beb3f3 */ const RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME = 2; /** *

    Begin internal error codes

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__BEGIN = -200; /** *

    Received message is incorrect

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__BAD_MSG = -199; /** *

    Bad/unknown compression

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198; /** *

    Broker is going away

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__DESTROY = -197; /** *

    Generic failure

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__FAIL = -196; /** *

    Broker transport failure

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__TRANSPORT = -195; /** *

    Critical system resource

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194; /** *

    Failed to resolve broker

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__RESOLVE = -193; /** *

    Produced message timed out

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192; /** - *

    Reached the end of the topic+partition queue on the broker. Not really an error.

    + *

    Reached the end of the topic+partition queue on the broker. Not really an error. This event is disabled by default, see the enable.partition.eof configuration property.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__PARTITION_EOF = -191; /** *

    Permanent: Partition does not exist in cluster.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190; /** *

    File or filesystem error

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__FS = -189; /** *

    Permanent: Topic does not exist in cluster.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188; /** *

    All broker connections are down.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187; /** *

    Invalid argument, or invalid configuration

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__INVALID_ARG = -186; /** *

    Operation timed out

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__TIMED_OUT = -185; /** *

    Queue is full

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__QUEUE_FULL = -184; /** *

    ISR count < required.acks

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__ISR_INSUFF = -183; /** *

    Broker node update

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__NODE_UPDATE = -182; /** *

    SSL error

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__SSL = -181; /** *

    Waiting for coordinator to become available.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__WAIT_COORD = -180; /** *

    Unknown client group

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179; /** *

    Operation in progress

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__IN_PROGRESS = -178; /** *

    Previous operation in progress, wait for it to finish.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177; /** *

    This operation would interfere with an existing subscription

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176; /** *

    Assigned partitions (rebalance_cb)

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175; /** *

    Revoked partitions (rebalance_cb)

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174; /** *

    Conflicting use

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__CONFLICT = -173; /** *

    Wrong state

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__STATE = -172; /** *

    Unknown protocol

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171; /** *

    Not implemented

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170; /** *

    Authentication failure

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__AUTHENTICATION = -169; /** *

    No stored offset

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__NO_OFFSET = -168; /** *

    Outdated

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__OUTDATED = -167; /** *

    Timed out in queue

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166; /** *

    Feature not supported by broker

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165; /** *

    Awaiting cache update

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__WAIT_CACHE = -164; /** - * enum rd_kafka_resp_err_t + *

    Operation interrupted (e.g., due to yield))

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__INTR = -163; /** - * enum rd_kafka_resp_err_t + *

    Key serialization error

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = -162; /** - * enum rd_kafka_resp_err_t + *

    Value serialization error

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = -161; /** - * enum rd_kafka_resp_err_t + *

    Key deserialization error

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = -160; /** - * enum rd_kafka_resp_err_t + *

    Value deserialization error

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = -159; /** - * enum rd_kafka_resp_err_t + *

    Partial response

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__PARTIAL = -158; /** - * enum rd_kafka_resp_err_t + *

    Modification attempted on read-only object

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__READ_ONLY = -157; /** - * enum rd_kafka_resp_err_t + *

    No such entry / item not found

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__NOENT = -156; /** - * enum rd_kafka_resp_err_t + *

    Read underflow

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__UNDERFLOW = -155; /** - * enum rd_kafka_resp_err_t + *

    Invalid type

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__INVALID_TYPE = -154; /** - * enum rd_kafka_resp_err_t + *

    Retry operation

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__RETRY = -153; /** - * enum rd_kafka_resp_err_t + *

    Purged in queue

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__PURGE_QUEUE = -152; /** - * enum rd_kafka_resp_err_t + *

    Purged in flight

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = -151; /** - * enum rd_kafka_resp_err_t + *

    Fatal error: see rd_kafka_fatal_error()

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__FATAL = -150; /** - * enum rd_kafka_resp_err_t + *

    Inconsistent state

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__INCONSISTENT = -149; /** - * enum rd_kafka_resp_err_t + *

    Gap-less ordering would not be guaranteed if proceeding

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = -148; /** - * enum rd_kafka_resp_err_t + *

    Maximum poll interval exceeded

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = -147; /** *

    End internal error codes

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__END = -100; /** *

    Unknown broker error

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNKNOWN = -1; /** *

    Success

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_NO_ERROR = 0; /** *

    Offset out of range

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1; /** *

    Invalid message

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_MSG = 2; /** *

    Unknown topic or partition

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3; /** *

    Invalid message size

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4; /** *

    Leader not available

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5; /** *

    Not leader for partition

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6; /** *

    Request timed out

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7; /** *

    Broker not available

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8; /** *

    Replica not available

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9; /** *

    Message size too large

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10; /** *

    StaleControllerEpochCode

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11; /** *

    Offset metadata string too large

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12; /** *

    Broker disconnected before response received

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13; /** *

    Group coordinator load in progress

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#afa695eee077bfd031e5bb637b899cfd8 */ const RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14; /** *

    Group coordinator not available

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a23a154df5190e1fc072199737f39bd87 */ const RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15; /** *

    Not coordinator for group

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a7755c1901da130a937be67a1c66e2c1f */ const RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16; /** *

    Invalid topic

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17; /** *

    Message batch larger than configured server segment size

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18; /** *

    Not enough in-sync replicas

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19; /** *

    Message(s) written to insufficient number of in-sync replicas

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20; /** *

    Invalid required acks value

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21; /** *

    Specified group generation id is not valid

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22; /** *

    Inconsistent group protocol

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23; /** *

    Invalid group.id

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24; /** *

    Unknown member

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25; /** *

    Invalid session timeout

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26; /** *

    Group rebalance in progress

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27; /** *

    Commit offset data size is not valid

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28; /** *

    Topic authorization failed

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29; /** *

    Group authorization failed

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30; /** *

    Cluster authorization failed

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31; /** *

    Invalid timestamp

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32; /** *

    Unsupported SASL mechanism

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33; /** *

    Illegal SASL state

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34; /** *

    Unuspported version

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35; /** *

    Topic already exists

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36; /** *

    Invalid number of partitions

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37; /** *

    Invalid replication factor

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38; /** *

    Invalid replica assignment

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39; /** *

    Invalid config

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40; /** *

    Not controller for cluster

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41; /** *

    Invalid request

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42; /** *

    Message format on broker does not support request

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43; /** - * enum rd_kafka_resp_err_t + *

    Policy violation

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44; /** - * enum rd_kafka_resp_err_t + *

    Broker received an out of order sequence number

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45; /** - * enum rd_kafka_resp_err_t + *

    Broker received a duplicate sequence number

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46; /** - * enum rd_kafka_resp_err_t + *

    Producer attempted an operation with an old epoch

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47; /** - * enum rd_kafka_resp_err_t + *

    Producer attempted a transactional operation in an invalid state

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48; /** - * enum rd_kafka_resp_err_t + *

    Producer attempted to use a producer id which is not currently assigned to its transactional id

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49; /** - * enum rd_kafka_resp_err_t + *

    Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50; /** - * enum rd_kafka_resp_err_t + *

    Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51; /** - * enum rd_kafka_resp_err_t + *

    Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52; /** - * enum rd_kafka_resp_err_t + *

    Transactional Id authorization failed

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53; /** - * enum rd_kafka_resp_err_t + *

    Security features are disabled

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54; /** - * enum rd_kafka_resp_err_t + *

    Operation not attempted

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55; /** - * enum rd_kafka_resp_err_t + *

    Disk error when trying to access log file on the disk

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56; /** - * enum rd_kafka_resp_err_t + *

    The user-specified log directory is not found in the broker config

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57; /** - * enum rd_kafka_resp_err_t + *

    SASL Authentication failed

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58; /** - * enum rd_kafka_resp_err_t + *

    Unknown Producer Id

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59; /** - * enum rd_kafka_resp_err_t + *

    Partition reassignment is in progress

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60; /** - * enum rd_kafka_resp_err_t + *

    Delegation Token feature is not enabled

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61; /** - * enum rd_kafka_resp_err_t + *

    Delegation Token is not found on server

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62; /** - * enum rd_kafka_resp_err_t + *

    Specified Principal is not valid Owner/Renewer

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63; /** - * enum rd_kafka_resp_err_t + *

    Delegation Token requests are not allowed on this connection

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64; /** - * enum rd_kafka_resp_err_t + *

    Delegation Token authorization failed

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65; /** - * enum rd_kafka_resp_err_t + *

    Delegation Token is expired

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66; /** - * enum rd_kafka_resp_err_t + *

    Supplied principalType is not supported

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67; /** - * enum rd_kafka_resp_err_t + *

    The group is not empty

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68; /** - * enum rd_kafka_resp_err_t + *

    The group id does not exist

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69; /** - * enum rd_kafka_resp_err_t + *

    The fetch session ID was not found

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70; /** - * enum rd_kafka_resp_err_t + *

    The fetch session epoch is invalid

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71; /** - * enum rd_kafka_resp_err_t + *

    No matching listener

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72; /** - * enum rd_kafka_resp_err_t + *

    Topic deletion is disabled

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73; /** *

    va-arg sentinel

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a03c74ceba678b4e7a624310160a02165 - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03c74ceba678b4e7a624310160a02165 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_END = 0; /** *

    (const char *) Topic name

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_TOPIC = 1; /** *

    (rd_kafka_topic_t *) Topic handle

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_RKT = 2; /** *

    (int32_t) Partition

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_PARTITION = 3; /** *

    (void *, size_t) Message value (payload)

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_VALUE = 4; /** *

    (void *, size_t) Message key

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_KEY = 5; /** - *

    (void *) Application opaque

    + *

    (void *) Per-message application opaque value. This is the same as the _private field in rd_kafka_message_t, also known as the msg_opaque.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_OPAQUE = 6; /** *

    (int) RD_KAFKA_MSG_F_.. flags

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_MSGFLAGS = 7; /** *

    (int64_t) Milliseconds since epoch UTC

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_TIMESTAMP = 8; /** - * enum rd_kafka_vtype_t + *

    (const char *, const void *, ssize_t) Message Header

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_HEADER = 9; /** - * enum rd_kafka_vtype_t + *

    (rd_kafka_headers_t *) Headers list

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a9aac65afa4c30e6d75550e39f6c1ea6b */ const RD_KAFKA_VTYPE_HEADERS = 10; /** - * enum rd_kafka_msg_status_t + *

    Message was never transmitted to the broker, or failed with an error indicating it was not written to the log. Application retry risks ordering, but not duplication.

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad46cb2b6064fcfbe2451aca8df802517 */ const RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0; /** - * enum rd_kafka_msg_status_t + *

    Message was transmitted to broker, but no acknowledgement was received. Application retry risks ordering and duplication.

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad46cb2b6064fcfbe2451aca8df802517 */ const RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1; /** - * enum rd_kafka_msg_status_t + *

    Message was written to the log and acknowledged by the broker. No reason for application to retry. Note: this value should only be trusted with acks=all.

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad46cb2b6064fcfbe2451aca8df802517 */ const RD_KAFKA_MSG_STATUS_PERSISTED = 2; /** *

    Unknown configuration name.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ad8306a08e59e8e2cbc6abdb84f9689f4 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad8306a08e59e8e2cbc6abdb84f9689f4 */ const RD_KAFKA_CONF_UNKNOWN = -2; /** - *

    Invalid configuration value.

    + *

    Invalid configuration value or property or value not supported in this build.

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ad8306a08e59e8e2cbc6abdb84f9689f4 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad8306a08e59e8e2cbc6abdb84f9689f4 */ const RD_KAFKA_CONF_INVALID = -1; /** *

    Configuration okay

    * @since 1.0.0 of librdkafka - * @link https://docs.confluent.io/3.2.1/clients/librdkafka/rdkafka_8h.html#ad8306a08e59e8e2cbc6abdb84f9689f4 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad8306a08e59e8e2cbc6abdb84f9689f4 */ const RD_KAFKA_CONF_OK = 0; /** - * enum rd_kafka_admin_op_t + *

    Default value

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_ANY = 0; /** - * enum rd_kafka_admin_op_t + *

    CreateTopics

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_CREATETOPICS = 1; /** - * enum rd_kafka_admin_op_t + *

    DeleteTopics

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_DELETETOPICS = 2; /** - * enum rd_kafka_admin_op_t + *

    CreatePartitions

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_CREATEPARTITIONS = 3; /** - * enum rd_kafka_admin_op_t + *

    AlterConfigs

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_ALTERCONFIGS = 4; /** - * enum rd_kafka_admin_op_t + *

    DescribeConfigs

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS = 5; /** - * enum rd_kafka_ConfigSource_t + *

    Source unknown, e.g., in the ConfigEntry used for alter requests where source is not set

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aab84b4178e74bd4319721ee0c68ae62c */ const RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0; /** - * enum rd_kafka_ConfigSource_t + *

    Dynamic topic config that is configured for a specific topic

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aab84b4178e74bd4319721ee0c68ae62c */ const RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1; /** - * enum rd_kafka_ConfigSource_t + *

    Dynamic broker config that is configured for a specific broker

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aab84b4178e74bd4319721ee0c68ae62c */ const RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2; /** - * enum rd_kafka_ConfigSource_t + *

    Dynamic broker config that is configured as default for all brokers in the cluster

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aab84b4178e74bd4319721ee0c68ae62c */ const RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3; /** - * enum rd_kafka_ConfigSource_t + *

    Static broker config provided as broker properties at startup (e.g. from server.properties file)

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aab84b4178e74bd4319721ee0c68ae62c */ const RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4; /** - * enum rd_kafka_ConfigSource_t + *

    Built-in default configuration for configs that have a default value

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aab84b4178e74bd4319721ee0c68ae62c */ const RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5; /** - * enum rd_kafka_ConfigSource_t + *

    Number of source types defined

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aab84b4178e74bd4319721ee0c68ae62c */ const RD_KAFKA_CONFIG_SOURCE__CNT = 6; /** - * enum rd_kafka_ResourceType_t + *

    Unknown

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acca83a66d7783ff7683ed2691cc907c9 */ const RD_KAFKA_RESOURCE_UNKNOWN = 0; /** - * enum rd_kafka_ResourceType_t + *

    Any (used for lookups)

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acca83a66d7783ff7683ed2691cc907c9 */ const RD_KAFKA_RESOURCE_ANY = 1; /** - * enum rd_kafka_ResourceType_t + *

    Topic

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acca83a66d7783ff7683ed2691cc907c9 */ const RD_KAFKA_RESOURCE_TOPIC = 2; /** - * enum rd_kafka_ResourceType_t + *

    Group

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acca83a66d7783ff7683ed2691cc907c9 */ const RD_KAFKA_RESOURCE_GROUP = 3; /** - * enum rd_kafka_ResourceType_t + *

    Broker

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acca83a66d7783ff7683ed2691cc907c9 */ const RD_KAFKA_RESOURCE_BROKER = 4; /** - * enum rd_kafka_ResourceType_t + *

    Number of resource types defined

    * @since 1.0.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acca83a66d7783ff7683ed2691cc907c9 */ const RD_KAFKA_RESOURCE__CNT = 5; /** - * enum rd_kafka_resp_err_t + *

    Leader epoch is older than broker epoch

    * @since 1.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74; /** - * enum rd_kafka_resp_err_t + *

    Leader epoch is newer than broker epoch

    * @since 1.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75; /** - * enum rd_kafka_resp_err_t + *

    Broker epoch has changed

    * @since 1.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77; /** - * enum rd_kafka_resp_err_t + *

    Leader high watermark is not caught up

    * @since 1.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78; /** - * enum rd_kafka_resp_err_t + *

    Group member needs a valid member ID

    * @since 1.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79; /** - * enum rd_kafka_resp_err_t + *

    Preferred leader was not available

    * @since 1.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80; /** - * enum rd_kafka_resp_err_t + *

    Consumer group has reached maximum size

    * @since 1.0.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81; /** - * define + *

    SASL/OAUTHBEARER token needs to be refreshed

    * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a63e097de934fac48b93411ac91564db1 */ const RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH = 256; /** - * enum rd_kafka_cert_type_t + *

    Client's public key

    * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5f0ba54591c0ffa725cb1a5eafe8b180 */ const RD_KAFKA_CERT_PUBLIC_KEY = 0; /** - * enum rd_kafka_cert_type_t + *

    Client's private key

    * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5f0ba54591c0ffa725cb1a5eafe8b180 */ const RD_KAFKA_CERT_PRIVATE_KEY = 1; /** - * enum rd_kafka_cert_type_t + *

    CA certificate

    * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a5f0ba54591c0ffa725cb1a5eafe8b180 */ const RD_KAFKA_CERT_CA = 2; @@ -1399,20 +1497,23 @@ const RD_KAFKA_CERT__CNT = 3; /** - * enum rd_kafka_cert_enc_t + *

    PKCS#12

    * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab7a624d3a484453f4abe5955bb6a45eb */ const RD_KAFKA_CERT_ENC_PKCS12 = 0; /** - * enum rd_kafka_cert_enc_t + *

    DER / binary X.509 ASN1

    * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab7a624d3a484453f4abe5955bb6a45eb */ const RD_KAFKA_CERT_ENC_DER = 1; /** - * enum rd_kafka_cert_enc_t + *

    PEM

    * @since 1.1.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab7a624d3a484453f4abe5955bb6a45eb */ const RD_KAFKA_CERT_ENC_PEM = 2; @@ -1423,284 +1524,334 @@ const RD_KAFKA_CERT_ENC__CNT = 3; /** - * enum rd_kafka_thread_type_t + *

    librdkafka's internal main thread

    * @since 1.2.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab327ca8929b6895de7b300970ad59745 */ const RD_KAFKA_THREAD_MAIN = 0; /** - * enum rd_kafka_thread_type_t + *

    Background thread (if enabled)

    * @since 1.2.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab327ca8929b6895de7b300970ad59745 */ const RD_KAFKA_THREAD_BACKGROUND = 1; /** - * enum rd_kafka_thread_type_t + *

    Per-broker thread

    * @since 1.2.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab327ca8929b6895de7b300970ad59745 */ const RD_KAFKA_THREAD_BROKER = 2; /** - * enum rd_kafka_resp_err_t + *

    Unknown broker

    * @since 1.3.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = -146; /** - * enum rd_kafka_resp_err_t + *

    Coordinator load in progress

    * @since 1.3.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#afa695eee077bfd031e5bb637b899cfd8 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14; /** - * enum rd_kafka_resp_err_t + *

    Coordinator not available

    * @since 1.3.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a23a154df5190e1fc072199737f39bd87 + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15; /** - * enum rd_kafka_resp_err_t + *

    Not coordinator

    * @since 1.3.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a7755c1901da130a937be67a1c66e2c1f + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16; /** - * enum rd_kafka_resp_err_t + *

    Functionality not configured

    * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__NOT_CONFIGURED = -145; /** - * enum rd_kafka_resp_err_t + *

    Instance has been fenced

    * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__FENCED = -144; /** - * enum rd_kafka_resp_err_t + *

    Application generated error

    * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__APPLICATION = -143; /** - * enum rd_kafka_resp_err_t + *

    Static consumer fenced by other consumer with same group.instance.id.

    * @since 1.4.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82; /** - * enum rd_kafka_resp_err_t + *

    Eligible partition leaders are not available

    * @since 1.5.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83; /** - * enum rd_kafka_resp_err_t + *

    Leader election not needed for topic partition

    * @since 1.5.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84; /** - * enum rd_kafka_resp_err_t + *

    No partition reassignment is in progress

    * @since 1.5.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85; /** - * enum rd_kafka_resp_err_t + *

    Deleting offsets of a topic while the consumer group is subscribed to it

    * @since 1.5.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86; /** - * enum rd_kafka_resp_err_t + *

    Broker failed to validate record

    * @since 1.5.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_RECORD = 87; /** - * enum rd_kafka_resp_err_t + *

    There are unstable offsets that need to be cleared

    * @since 1.5.2 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88; /** - * define + *

    DeleteRecords_result_t

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab572fd50785847b03c96af744ceffaf1 */ const RD_KAFKA_EVENT_DELETERECORDS_RESULT = 105; /** - * define + *

    DeleteGroups_result_t

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a6d2f3f1797ba2241c707b077ddc2c1b3 */ const RD_KAFKA_EVENT_DELETEGROUPS_RESULT = 106; /** - * define + *

    DeleteConsumerGroupOffsets_result_t

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a16dee39b23bef225ad3bb902cf391659 */ const RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT = 107; /** - * enum rd_kafka_resp_err_t + *

    Assignment lost

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = -142; /** - * enum rd_kafka_resp_err_t + *

    No operation performed

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__NOOP = -141; /** - * enum rd_kafka_resp_err_t + *

    Throttling quota has been exceeded

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89; /** - * enum rd_kafka_resp_err_t + *

    There is a newer producer with the same transactionalId which fences the current one

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90; /** - * enum rd_kafka_resp_err_t + *

    Request illegally referred to resource that does not exist

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91; /** - * enum rd_kafka_resp_err_t + *

    Request illegally referred to the same resource twice

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92; /** - * enum rd_kafka_resp_err_t + *

    Requested credential would not meet criteria for acceptability

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93; /** - * enum rd_kafka_resp_err_t + *

    Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94; /** - * enum rd_kafka_resp_err_t + *

    Invalid update version

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95; /** - * enum rd_kafka_resp_err_t + *

    Unable to update finalized features due to server error

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96; /** - * enum rd_kafka_resp_err_t + *

    Request principal deserialization failed during forwarding

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97; /** - * enum rd_kafka_admin_op_t + *

    DeleteRecords

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_DELETERECORDS = 6; /** - * enum rd_kafka_admin_op_t + *

    DeleteGroups

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_DELETEGROUPS = 7; /** - * enum rd_kafka_admin_op_t + *

    DeleteConsumerGroupOffsets

    * @since 1.6.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS = 8; /** - * enum rd_kafka_resp_err_t + *

    No offset to automatically reset to

    * @since 1.6.1 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a03509bab51072c72a8dcf52337e6d5cb */ const RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140; /** - * define + *

    Enable background thread.

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ad6fc906cff51d7ed8bfc9597c2054190 */ const RD_KAFKA_EVENT_BACKGROUND = 512; /** - * define + *

    CreateAcls_result_t

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ae09849fc4a1e0a1b4240fbf9e195f2b9 */ const RD_KAFKA_EVENT_CREATEACLS_RESULT = 1024; /** - * define + *

    DescribeAcls_result_t

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#af4991ffb7e86a8344a7012d40e2124d8 */ const RD_KAFKA_EVENT_DESCRIBEACLS_RESULT = 2048; /** - * define + *

    DeleteAcls_result_t

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#aa44fa18142c943e305b48ae67c836cb4 */ const RD_KAFKA_EVENT_DELETEACLS_RESULT = 4096; /** - * enum rd_kafka_admin_op_t + *

    CreateAcls

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_CREATEACLS = 9; /** - * enum rd_kafka_admin_op_t + *

    DescribeAcls

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_DESCRIBEACLS = 10; /** - * enum rd_kafka_admin_op_t + *

    DeleteAcls

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a8041b7c45068283d95f54ee14c7362fe */ const RD_KAFKA_ADMIN_OP_DELETEACLS = 11; /** - * enum rd_kafka_ResourcePatternType_t + *

    Unknown

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acbe0ed4347fe1bd085eef03ca7aadbdb */ const RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0; /** - * enum rd_kafka_ResourcePatternType_t + *

    Any (used for lookups)

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acbe0ed4347fe1bd085eef03ca7aadbdb */ const RD_KAFKA_RESOURCE_PATTERN_ANY = 1; /** - * enum rd_kafka_ResourcePatternType_t + *

    Match: will perform pattern matching

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acbe0ed4347fe1bd085eef03ca7aadbdb */ const RD_KAFKA_RESOURCE_PATTERN_MATCH = 2; /** - * enum rd_kafka_ResourcePatternType_t + *

    Literal: A literal resource name

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acbe0ed4347fe1bd085eef03ca7aadbdb */ const RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3; /** - * enum rd_kafka_ResourcePatternType_t + *

    Prefixed: A prefixed resource name

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#acbe0ed4347fe1bd085eef03ca7aadbdb */ const RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4; @@ -1711,80 +1862,93 @@ const RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT = 5; /** - * enum rd_kafka_AclOperation_t + *

    Unknown

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_UNKNOWN = 0; /** - * enum rd_kafka_AclOperation_t + *

    In a filter, matches any AclOperation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_ANY = 1; /** - * enum rd_kafka_AclOperation_t + *

    ALL operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_ALL = 2; /** - * enum rd_kafka_AclOperation_t + *

    READ operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_READ = 3; /** - * enum rd_kafka_AclOperation_t + *

    WRITE operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_WRITE = 4; /** - * enum rd_kafka_AclOperation_t + *

    CREATE operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_CREATE = 5; /** - * enum rd_kafka_AclOperation_t + *

    DELETE operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_DELETE = 6; /** - * enum rd_kafka_AclOperation_t + *

    ALTER operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_ALTER = 7; /** - * enum rd_kafka_AclOperation_t + *

    DESCRIBE operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_DESCRIBE = 8; /** - * enum rd_kafka_AclOperation_t + *

    CLUSTER_ACTION operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9; /** - * enum rd_kafka_AclOperation_t + *

    DESCRIBE_CONFIGS operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10; /** - * enum rd_kafka_AclOperation_t + *

    ALTER_CONFIGS operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11; /** - * enum rd_kafka_AclOperation_t + *

    IDEMPOTENT_WRITE operation

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#a526269d7fc5c9acd1b8b56e84895c2bb */ const RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12; @@ -1795,26 +1959,30 @@ const RD_KAFKA_ACL_OPERATION__CNT = 13; /** - * enum rd_kafka_AclPermissionType_t + *

    Unknown

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab2da1706fced5ccc7ac5566825ba44b8 */ const RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0; /** - * enum rd_kafka_AclPermissionType_t + *

    In a filter, matches any AclPermissionType

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab2da1706fced5ccc7ac5566825ba44b8 */ const RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1; /** - * enum rd_kafka_AclPermissionType_t + *

    Disallows access

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab2da1706fced5ccc7ac5566825ba44b8 */ const RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2; /** - * enum rd_kafka_AclPermissionType_t + *

    Grants access.

    * @since 1.9.0 of librdkafka + * @link https://docs.confluent.io/platform/current/clients/librdkafka/html/rdkafka_8h.html#ab2da1706fced5ccc7ac5566825ba44b8 */ const RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3;