diff -Nru librdkafka-1.9.2/.appveyor.yml librdkafka-2.0.2/.appveyor.yml --- librdkafka-1.9.2/.appveyor.yml 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/.appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -version: 1.9.1-R-post{build} -pull_requests: - do_not_increment_build_number: true -image: Visual Studio 2019 -configuration: Release -environment: - runtime: v142 - matrix: - - platform: x64 - arch: x64 - - platform: win32 - arch: x86 -install: -- ps: "& .\\win32\\install-coapp.ps1" - # Update vcpkg (is outdated on the VS 2015 image) -- cmd: | - cd "C:\Tools\vcpkg" - git pull -q - .\bootstrap-vcpkg.bat - cd %appveyor_build_folder% -cache: - - c:\tools\vcpkg\installed - - C:\Users\appveyor\AppData\Local\vcpkg\archives - - C:\Users\appveyor\AppData\Local\vcpkg\installed -nuget: - account_feed: true - project_feed: true - disable_publish_on_pr: true -before_build: - - cmd: vcpkg --feature-flags=versions install --triplet %arch%-windows -build: - project: win32/librdkafka.sln - publish_nuget: true - publish_nuget_symbols: true - include_nuget_references: true - parallel: true - verbosity: normal -test_script: -- cmd: cd tests && ..\win32\outdir\%runtime%\%PLATFORM%\%CONFIGURATION%\tests.exe -l -Q -p1 && cd .. -artifacts: -- path: test_report*.json - name: Test report -- path: '*.nupkg' - name: Packages -- path: '**\*.dll' - name: Libraries -- path: '**\*.lib' - name: Libraries -- path: '**\*.pdb' - name: Libraries -- path: '**\*.exe' - name: Executables -#before_deploy: -after_test: -- ps: >- - # FIXME: Add to Deployment condition above: - - # APPVEYOR_REPO_TAG = true - - - - # This is the CoApp .autopkg file to create. - - $autopkgFile = "win32/librdkafka.autopkg" - - pwd - - - ls $autopkgFile - - - - # Get the ".autopkg.template" file, replace "@version" with the Appveyor version number, then save to the ".autopkg" file. - - cat ($autopkgFile + ".template") | % { $_ -replace "@version", $env:appveyor_build_version } > $autopkgFile - - - # Use the CoApp tools to create NuGet native packages from the .autopkg. - - Write-NuGetPackage $autopkgFile - - - # Push all newly created .nupkg files as Appveyor artifacts for later deployment. - - Get-ChildItem .\*.nupkg | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name } -deploy: -- provider: S3 - access_key_id: - secure: 3SmFFB3J1WWjLqxouvH8zLdcmrFNVHHbkROb+2BBVJE= - secret_access_key: - secure: VT0D5uzlaJI6gfZbemKCnf0MMh6qnlcmioVADK0oCkW6syz+n17VzWScRjvAifPm - region: us-west-1 - bucket: librdkafka-ci-packages - folder: librdkafka/p-librdkafka__bld-appveyor__plat-windows__arch-$(platform)__bldtype-$(configuration)__tag-$(APPVEYOR_REPO_TAG_NAME)__sha-$(APPVEYOR_REPO_COMMIT)__bid-$(APPVEYOR_BUILD_ID) - artifact: /.*\.(nupkg)/ - max_error_retry: 3 - on: - APPVEYOR_REPO_TAG: true -notifications: -- provider: Email - to: - - magnus@edenhill.se - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff -Nru librdkafka-1.9.2/CHANGELOG.md librdkafka-2.0.2/CHANGELOG.md --- librdkafka-1.9.2/CHANGELOG.md 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/CHANGELOG.md 2023-01-20 09:14:36.000000000 +0000 @@ -1,3 +1,156 @@ +# librdkafka v2.0.2 + +librdkafka v2.0.2 is a bugfix release: + +* Fix OpenSSL version in Win32 nuget package (#4152). + + + +# librdkafka v2.0.1 + +librdkafka v2.0.1 is a bugfix release: + +* Fixed nuget package for Linux ARM64 release (#4150). + + + +# librdkafka v2.0.0 + +librdkafka v2.0.0 is a feature release: + + * [KIP-88](https://cwiki.apache.org/confluence/display/KAFKA/KIP-88%3A+OffsetFetch+Protocol+Update) + OffsetFetch Protocol Update (#3995). + * [KIP-222](https://cwiki.apache.org/confluence/display/KAFKA/KIP-222+-+Add+Consumer+Group+operations+to+Admin+API) + Add Consumer Group operations to Admin API (started by @lesterfan, #3995). + * [KIP-518](https://cwiki.apache.org/confluence/display/KAFKA/KIP-518%3A+Allow+listing+consumer+groups+per+state) + Allow listing consumer groups per state (#3995). + * [KIP-396](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=97551484) + Partially implemented: support for AlterConsumerGroupOffsets + (started by @lesterfan, #3995). + * OpenSSL 3.0.x support - the maximum bundled OpenSSL version is now 3.0.7 (previously 1.1.1q). + * Fixes to the transactional and idempotent producer. + + +## Upgrade considerations + +### OpenSSL 3.0.x + +#### OpenSSL default ciphers + +The introduction of OpenSSL 3.0.x in the self-contained librdkafka bundles +changes the default set of available ciphers, in particular all obsolete +or insecure ciphers and algorithms as listed in the +OpenSSL [legacy](https://www.openssl.org/docs/man3.0/man7/OSSL_PROVIDER-legacy.html) +manual page are now disabled by default. + +**WARNING**: These ciphers are disabled for security reasons and it is +highly recommended NOT to use them. + +Should you need to use any of these old ciphers you'll need to explicitly +enable the `legacy` provider by configuring `ssl.providers=default,legacy` +on the librdkafka client. + +#### OpenSSL engines and providers + +OpenSSL 3.0.x deprecates the use of engines, which is being replaced by +providers. As such librdkafka will emit a deprecation warning if +`ssl.engine.location` is configured. + +OpenSSL providers may be configured with the new `ssl.providers` +configuration property. + +### Broker TLS certificate hostname verification + +The default value for `ssl.endpoint.identification.algorithm` has been +changed from `none` (no hostname verification) to `https`, which enables +broker hostname verification (to counter man-in-the-middle +impersonation attacks) by default. + +To restore the previous behaviour, set `ssl.endpoint.identification.algorithm` to `none`. + +## Known Issues + +### Poor Consumer batch API messaging guarantees + +The Consumer Batch APIs `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` +are not thread safe if `rkmessages_size` is greater than 1 and any of the **seek**, +**pause**, **resume** or **rebalancing** operation is performed in parallel with any of +the above APIs. Some of the messages might be lost, or erroneously returned to the +application, in the above scenario. + +It is strongly recommended to use the Consumer Batch APIs and the mentioned +operations in sequential order in order to get consistent result. + +For **rebalancing** operation to work in sequencial manner, please set `rebalance_cb` +configuration property (refer [examples/rdkafka_complex_consumer_example.c] +(examples/rdkafka_complex_consumer_example.c) for the help with the usage) for the consumer. + +## Enhancements + + * Self-contained static libraries can now be built on Linux arm64 (#4005). + * Updated to zlib 1.2.13, zstd 1.5.2, and curl 7.86.0 in self-contained + librdkafka bundles. + * Added `on_broker_state_change()` interceptor + * The C++ API no longer returns strings by const value, which enables better move optimization in callers. + * Added `rd_kafka_sasl_set_credentials()` API to update SASL credentials. + * Setting `allow.auto.create.topics` will no longer give a warning if used by a producer, since that is an expected use case. + Improvement in documentation for this property. + * Added a `resolve_cb` configuration setting that permits using custom DNS resolution logic. + * Added `rd_kafka_mock_broker_error_stack_cnt()`. + * The librdkafka.redist NuGet package has been updated to have fewer external + dependencies for its bundled librdkafka builds, as everything but cyrus-sasl + is now built-in. There are bundled builds with and without linking to + cyrus-sasl for maximum compatibility. + * Admin API DescribeGroups() now provides the group instance id + for static members [KIP-345](https://cwiki.apache.org/confluence/display/KAFKA/KIP-345%3A+Introduce+static+membership+protocol+to+reduce+consumer+rebalances) (#3995). + + +## Fixes + +### General fixes + + * Windows: couldn't read a PKCS#12 keystore correctly because binary mode + wasn't explicitly set and Windows defaults to text mode. + * Fixed memory leak when loading SSL certificates (@Mekk, #3930) + * Load all CA certificates from `ssl.ca.pem`, not just the first one. + * Each HTTP request made when using OAUTHBEARER OIDC would leak a small + amount of memory. + +### Transactional producer fixes + + * When a PID epoch bump is requested and the producer is waiting + to reconnect to the transaction coordinator, a failure in a find coordinator + request could cause an assert to fail. This is fixed by retrying when the + coordinator is known (#4020). + * Transactional APIs (except `send_offsets_for_transaction()`) that + timeout due to low timeout_ms may now be resumed by calling the same API + again, as the operation continues in the background. + * For fatal idempotent producer errors that may be recovered by bumping the + epoch the current transaction must first be aborted prior to the epoch bump. + This is now handled correctly, which fixes issues seen with fenced + transactional producers on fatal idempotency errors. + * Timeouts for EndTxn requests (transaction commits and aborts) are now + automatically retried and the error raised to the application is also + a retriable error. + * TxnOffsetCommitRequests were retried immediately upon temporary errors in + `send_offsets_to_transactions()`, causing excessive network requests. + These retries are now delayed 500ms. + * If `init_transactions()` is called with an infinite timeout (-1), + the timeout will be limited to 2 * `transaction.timeout.ms`. + The application may retry and resume the call if a retriable error is + returned. + + +### Consumer fixes + + * Back-off and retry JoinGroup request if coordinator load is in progress. + * Fix `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` skipping + other partitions' offsets intermittently when **seek**, **pause**, **resume** + or **rebalancing** is used for a partition. + * Fix `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` + intermittently returing incorrect partitions' messages if **rebalancing** + happens during these operations. + # librdkafka v1.9.2 librdkafka v1.9.2 is a maintenance release: diff -Nru librdkafka-1.9.2/.clang-format librdkafka-2.0.2/.clang-format --- librdkafka-1.9.2/.clang-format 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/.clang-format 2023-01-20 09:14:36.000000000 +0000 @@ -1,53 +1,136 @@ --- -BasedOnStyle: LLVM +Language: Cpp +AccessModifierOffset: -2 AlignAfterOpenBracket: Align -AlignConsecutiveMacros: 'true' -AlignConsecutiveAssignments: 'true' -AlignConsecutiveDeclarations: 'false' +AlignConsecutiveMacros: true +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right -AlignOperands: 'true' -AlignTrailingComments: 'true' -AllowAllArgumentsOnNextLine: 'true' -AllowAllConstructorInitializersOnNextLine: 'true' -AllowAllParametersOfDeclarationOnNextLine: 'false' -AllowShortBlocksOnASingleLine: 'false' -AllowShortCaseLabelsOnASingleLine: 'false' +AlignOperands: true +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: None +AllowShortLambdasOnASingleLine: All AllowShortIfStatementsOnASingleLine: Never -AllowShortLoopsOnASingleLine: 'false' +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None -AlwaysBreakBeforeMultilineStrings: 'true' -BinPackArguments: 'true' -BinPackParameters: 'false' +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: MultiLine +BinPackArguments: true +BinPackParameters: false +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None BreakBeforeBraces: Custom -BreakBeforeTernaryOperators: 'true' +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false BreakConstructorInitializers: AfterColon -BreakStringLiterals: 'true' -ColumnLimit: '80' -DerivePointerAlignment: 'false' -SortIncludes: 'false' -IncludeBlocks: Preserve -IndentCaseLabels: 'false' +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DeriveLineEnding: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + Priority: 2 + SortPriority: 0 + - Regex: '^(<|"(gtest|gmock|isl|json)/)' + Priority: 3 + SortPriority: 0 + - Regex: '.*' + Priority: 1 + SortPriority: 0 +IncludeIsMainRegex: '(Test)?$' +IncludeIsMainSourceRegex: '' +IndentCaseLabels: false +IndentGotoLabels: true IndentPPDirectives: None -IndentWidth: '8' -Language: Cpp -MaxEmptyLinesToKeep: '3' +IndentWidth: 8 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: true +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 3 +NamespaceIndentation: None +ObjCBinPackProtocolList: Auto +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Right -ReflowComments: 'true' -SpaceAfterCStyleCast: 'false' -SpaceAfterLogicalNot: 'false' -SpaceBeforeAssignmentOperators: 'true' -SpaceBeforeCpp11BracedList: 'true' +ReflowComments: true +SortIncludes: false +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: true +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true SpaceBeforeParens: ControlStatements -SpaceBeforeRangeBasedForLoopColon: 'true' -SpaceInEmptyParentheses: 'false' -SpacesBeforeTrailingComments: '2' -SpacesInAngles: 'false' -SpacesInCStyleCastParentheses: 'false' -SpacesInContainerLiterals: 'false' -SpacesInParentheses: 'false' -SpacesInSquareBrackets: 'false' -TabWidth: '8' -UseTab: Never - +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +Standard: Latest +StatementMacros: + - Q_UNUSED + - QT_REQUIRE_VERSION +TabWidth: 8 +UseCRLF: false +UseTab: Never ... + diff -Nru librdkafka-1.9.2/.clang-format-cpp librdkafka-2.0.2/.clang-format-cpp --- librdkafka-1.9.2/.clang-format-cpp 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/.clang-format-cpp 2023-01-20 09:14:36.000000000 +0000 @@ -1,52 +1,103 @@ --- BasedOnStyle: Google -AlignConsecutiveMacros: 'true' -AlignConsecutiveAssignments: 'true' -AlignConsecutiveDeclarations: 'false' +Language: Cpp +AccessModifierOffset: -1 +AlignAfterOpenBracket: Align +AlignConsecutiveMacros: true +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right -AlignOperands: 'true' -AlignTrailingComments: 'true' -AllowAllArgumentsOnNextLine: 'true' -AllowAllConstructorInitializersOnNextLine: 'true' -AllowAllParametersOfDeclarationOnNextLine: 'false' -AllowShortBlocksOnASingleLine: 'false' -AllowShortCaseLabelsOnASingleLine: 'false' +AlignOperands: true +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: None +AllowShortLambdasOnASingleLine: All AllowShortIfStatementsOnASingleLine: Never -AllowShortLoopsOnASingleLine: 'false' +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None -AlwaysBreakBeforeMultilineStrings: 'true' -BinPackArguments: 'true' -BinPackParameters: 'false' +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: Yes +BinPackArguments: true +BinPackParameters: false +BreakBeforeBinaryOperators: None BreakBeforeBraces: Custom -BreakBeforeTernaryOperators: 'true' +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false BreakConstructorInitializers: AfterColon -BreakStringLiterals: 'true' -ColumnLimit: '80' -DerivePointerAlignment: 'false' -SortIncludes: 'false' -IncludeBlocks: Preserve -IndentCaseLabels: 'false' +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DeriveLineEnding: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +IncludeBlocks: Preserve +IncludeIsMainRegex: '([-_](test|unittest))?$' +IncludeIsMainSourceRegex: '' +IndentCaseLabels: false +IndentGotoLabels: true IndentPPDirectives: None -IndentWidth: '2' -Language: Cpp -MaxEmptyLinesToKeep: '3' +IndentWidth: 2 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 3 +NamespaceIndentation: None +ObjCBinPackProtocolList: Never +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 PointerAlignment: Right -ReflowComments: 'true' -SpaceAfterCStyleCast: 'false' -SpaceAfterLogicalNot: 'false' -SpaceBeforeAssignmentOperators: 'true' -SpaceBeforeCpp11BracedList: 'true' +ReflowComments: true +SortIncludes: false +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: true +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true SpaceBeforeParens: ControlStatements -SpaceBeforeRangeBasedForLoopColon: 'true' -SpaceInEmptyParentheses: 'false' -SpacesBeforeTrailingComments: '2' -SpacesInAngles: 'false' -SpacesInCStyleCastParentheses: 'false' -SpacesInContainerLiterals: 'false' -SpacesInParentheses: 'false' -SpacesInSquareBrackets: 'false' -TabWidth: '8' -UseTab: Never - +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +Standard: Auto +TabWidth: 8 +UseCRLF: false +UseTab: Never ... + diff -Nru librdkafka-1.9.2/CONFIGURATION.md librdkafka-2.0.2/CONFIGURATION.md --- librdkafka-1.9.2/CONFIGURATION.md 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/CONFIGURATION.md 2023-01-20 09:14:36.000000000 +0000 @@ -50,6 +50,7 @@ connect_cb | * | | | low | Socket connect callback
*Type: see dedicated API* closesocket_cb | * | | | low | Socket close callback
*Type: see dedicated API* open_cb | * | | | low | File open callback to provide race-free CLOEXEC
*Type: see dedicated API* +resolve_cb | * | | | low | Address resolution callback (set with rd_kafka_conf_set_resolve_cb()).
*Type: see dedicated API* opaque | * | | | low | Application opaque (set with rd_kafka_conf_set_opaque())
*Type: see dedicated API* default_topic_conf | * | | | low | Default topic configuration for automatically subscribed topics
*Type: see dedicated API* internal.termination.signal | * | 0 .. 128 | 0 | low | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed.
*Type: integer* @@ -57,6 +58,7 @@ api.version.request.timeout.ms | * | 1 .. 300000 | 10000 | low | Timeout for broker API version requests.
*Type: integer* api.version.fallback.ms | * | 0 .. 604800000 | 0 | medium | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade).
*Type: integer* broker.version.fallback | * | | 0.10.0 | medium | Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests.
*Type: string* +allow.auto.create.topics | * | true, false | false | low | Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuration to take effect. Note: the default value (true) for the producer is different from the default value (false) for the consumer. Further, the consumer default value is different from the Java consumer (true), and this property is not supported by the Java producer. Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies.
*Type: boolean* security.protocol | * | plaintext, ssl, sasl_plaintext, sasl_ssl | plaintext | high | Protocol used to communicate with brokers.
*Type: enum value* ssl.cipher.suites | * | | | low | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3).
*Type: string* ssl.curves.list | * | | | low | The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.
*Type: string* @@ -75,11 +77,12 @@ ssl.crl.location | * | | | low | Path to CRL for verifying broker's certificate validity.
*Type: string* ssl.keystore.location | * | | | low | Path to client's keystore (PKCS#12) used for authentication.
*Type: string* ssl.keystore.password | * | | | low | Client's keystore (PKCS#12) password.
*Type: string* -ssl.engine.location | * | | | low | Path to OpenSSL engine library. OpenSSL >= 1.1.0 required.
*Type: string* +ssl.providers | * | | | low | Comma-separated list of OpenSSL 3.0.x implementation providers. E.g., "default,legacy".
*Type: string* +ssl.engine.location | * | | | low | **DEPRECATED** Path to OpenSSL engine library. OpenSSL >= 1.1.x required. DEPRECATED: OpenSSL engine support is deprecated and should be replaced by OpenSSL 3 providers.
*Type: string* ssl.engine.id | * | | dynamic | low | OpenSSL engine id is the name used for loading engine.
*Type: string* ssl_engine_callback_data | * | | | low | OpenSSL engine callback data (set with rd_kafka_conf_set_engine_callback_data()).
*Type: see dedicated API* enable.ssl.certificate.verification | * | true, false | true | low | Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb.
*Type: boolean* -ssl.endpoint.identification.algorithm | * | none, https | none | low | Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required.
*Type: enum value* +ssl.endpoint.identification.algorithm | * | none, https | https | low | Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required.
*Type: enum value* ssl.certificate.verify_cb | * | | | low | Callback to verify the broker certificate chain.
*Type: see dedicated API* sasl.mechanisms | * | | GSSAPI | high | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* sasl.mechanism | * | | GSSAPI | high | Alias for `sasl.mechanisms`: SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* @@ -127,13 +130,12 @@ offset_commit_cb | C | | | low | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb())
*Type: see dedicated API* enable.partition.eof | C | true, false | false | low | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition.
*Type: boolean* check.crcs | C | true, false | false | medium | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage.
*Type: boolean* -allow.auto.create.topics | C | true, false | false | low | Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuraiton to take effect. Note: The default value (false) is different from the Java consumer (true). Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies.
*Type: boolean* client.rack | * | | | low | A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config `broker.rack`.
*Type: string* transactional.id | P | | | high | Enables the transactional producer. The transactional.id is used to identify the same transactional producer instance across process restarts. It allows the producer to guarantee that transactions corresponding to earlier instances of the same producer have been finalized prior to starting any new transactions, and that any zombie instances are fenced off. If no transactional.id is provided, then the producer is limited to idempotent delivery (if enable.idempotence is set). Requires broker version >= 0.11.0.
*Type: string* transaction.timeout.ms | P | 1000 .. 2147483647 | 60000 | medium | The maximum amount of time in milliseconds that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction. If this value is larger than the `transaction.max.timeout.ms` setting in the broker, the init_transactions() call will fail with ERR_INVALID_TRANSACTION_TIMEOUT. The transaction timeout automatically adjusts `message.timeout.ms` and `socket.timeout.ms`, unless explicitly configured in which case they must not exceed the transaction timeout (`socket.timeout.ms` must be at least 100ms lower than `transaction.timeout.ms`). This is also the default timeout value if no timeout (-1) is supplied to the transactional API methods.
*Type: integer* enable.idempotence | P | true, false | false | high | When set to `true`, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: `max.in.flight.requests.per.connection=5` (must be less than or equal to 5), `retries=INT32_MAX` (must be greater than 0), `acks=all`, `queuing.strategy=fifo`. Producer instantation will fail if user-supplied configuration is incompatible.
*Type: boolean* enable.gapless.guarantee | P | true, false | false | low | **EXPERIMENTAL**: subject to change or removal. When set to `true`, any error that could result in a gap in the produced message series when a batch of messages fails, will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop the producer. Messages failing due to `message.timeout.ms` are not covered by this guarantee. Requires `enable.idempotence=true`.
*Type: boolean* -queue.buffering.max.messages | P | 1 .. 10000000 | 100000 | high | Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions.
*Type: integer* +queue.buffering.max.messages | P | 0 .. 2147483647 | 100000 | high | Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions. A value of 0 disables this limit.
*Type: integer* queue.buffering.max.kbytes | P | 1 .. 2147483647 | 1048576 | high | Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages.
*Type: integer* queue.buffering.max.ms | P | 0 .. 900000 | 5 | high | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: float* linger.ms | P | 0 .. 900000 | 5 | high | Alias for `queue.buffering.max.ms`: Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: float* diff -Nru librdkafka-1.9.2/CONTRIBUTING.md librdkafka-2.0.2/CONTRIBUTING.md --- librdkafka-1.9.2/CONTRIBUTING.md 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/CONTRIBUTING.md 2023-01-20 09:14:36.000000000 +0000 @@ -22,11 +22,89 @@ give credit but also to keep a trace back to who made what changes. Please always provide us with your full real name when contributing! -Official librdkafka project maintainer(s) assume ownership of all accepted -submissions. +Official librdkafka project maintainer(s) assume ownership and copyright +ownership of all accepted submissions. + ## Write a good patch +### API and ABI compatibility guarantees + +librdkafka maintains a strict API and ABI compatibility guarantee, we guarantee +not to break existing applications and we honour the SONAME version. + +**Note:** ABI compatibility is guaranteed only for the C library, not C++. + +**Note to librdkafka maintainers:** + +Don't think we can or should bump the SONAME version, it will break all +existing applications relying on librdkafka, and there's no change important +enough to warrant that. +Instead deprecate (but keep) old APIs and add new better APIs as required. +Deprecate APIs through documentation (`@deprecate ..`) rather than +compiler hints (`RD_DEPRECATED`) - since the latter will cause compilation +warnings/errors for users. + + +#### Changes to existing APIs + +Existing public APIs MUST NEVER be changed, as this would be a breaking API +and ABI change. This line must never be crossed. + +This means that no changes are allowed to: + * public function or method signatures - arguments, types, return values. + * public structs - existing fields may not be modified and new fields must + not be added. + + +As for semantic changes (i.e., a function changes its behaviour), these are +allowed under the following conditions: + + * the existing behaviour that is changed is not documented and not widely + relied upon. Typically this revolves around what error codes a function + returns. + * the existing behaviour is well known but is clearly wrong and consistently + trips people up. + +All such changes must be clearly stated in the "Upgrade considerations" section +of the release in CHANGELOG.md. + + +#### New public APIs + +Since changes to existing APIs are strictly limited to the above rules, it is +also clear that new APIs must be delicately designed to be complete and future +proof, since once they've been introduced they can never be changed. + + * Never add public structs - there are some public structs in librdkafka + and they were all mistakes, they've all been headaches. + Instead add private types and provide accessor methods to set/get values. + This allows future extension without breaking existing applications. + * Avoid adding synchronous APIs, try to make them asynch by the use of + `rd_kafka_queue_t` result queues, if possible. + This may complicate the APIs a bit, but they're most of the time abstracted + in higher-level language clients and it allows both synchronous and + asynchronous usage. + + + +### Portability + +librdkafka is highly portable and needs to stay that way; this means we're +limited to almost-but-not-quite C99, and standard library (libc, et.al) +functions that are generally available across platforms. + +Also avoid adding new dependencies since dependency availability across +platforms and package managers are a common problem. + +If an external dependency is required, make sure that it is available as a +vcpkg, and also add it as a source build dependency to mklove +(see mklove/modules/configure.libcurl for an example) so that it can be built +and linked statically into librdkafka as part of the packaging process. + +Less is more. Don't try to be fancy, be boring. + + ### Follow code style When writing C code, follow the code style already established in @@ -36,7 +114,7 @@ clang-format is used to check, and fix, the style for C/C++ files, while flake8 and autopep8 is used for the Python scripts. -You should check the style before committing by running `make style-check-changed` +You must check the style before committing by running `make style-check-changed` from the top-level directory, and if any style errors are reported you can automatically fix them using `make style-fix-changed` (or just run that command directly). @@ -80,7 +158,7 @@ New features and APIs should also result in an added test case. Submitted patches must pass all existing tests. -For more information on the test suite see [tests/README.md] +For more information on the test suite see [tests/README.md]. @@ -167,7 +245,20 @@ -# librdkafka C style guide +# librdkafka C style and naming guide + +*Note: The code format style is enforced by our clang-format and pep8 rules, +so that is not covered here.* + +## C standard "C98" + +This is a mix of C89 and C99, to be compatible with old MSVC versions. + +Notable, it is C99 with the following limitations: + + * No variable declarations after statements. + * No in-line variable declarations. + ## Function and globals naming @@ -176,6 +267,12 @@ their subsystem (e.g., `cgrp`, `broker`, `buf`, etc..), followed by an action (e.g, `find`, `get`, `clear`, ..). +The exceptions are: + - Protocol requests and fields, use their Apache Kafka CamelCase names, .e.g: + `rd_kafka_ProduceRequest()` and `int16_t ErrorCode`. + - Public APIs that closely mimic the Apache Kafka Java counterpart, e.g., + the Admin API: `rd_kafka_DescribeConsumerGroups()`. + ## Variable naming @@ -186,6 +283,9 @@ * `rd_kafka_broker_t` has field names starting with `rkb_..`, thus broker variable names should be named `rkb` +Be consistent with using the same variable name for the same type throughout +the code, it makes reading the code much easier as the type can be easily +inferred from the variable. For other types use reasonably concise but descriptive names. `i` and `j` are typical int iterators. @@ -193,15 +293,27 @@ ## Variable declaration Variables must be declared at the head of a scope, no in-line variable -declarations are allowed. +declarations after statements are allowed. + +## Function parameters/arguments + +For internal functions assume that all function parameters are properly +specified, there is no need to check arguments for non-NULL, etc. +Any maluse internally is a bug, and not something we need to preemptively +protect against - the test suites should cover most of the code anyway - so +put your efforts there instead. + +For arguments that may be NULL, i.e., optional arguments, we explicitlly +document in the function docstring that the argument is optional (NULL), +but there is no need to do this for non-optional arguments. ## Indenting -Use 8 spaces indent, same as the Linux kernel. +Use 8 spaces indent, no tabs, same as the Linux kernel. In emacs, use `c-set-style "linux`. For C++, use Google's C++ style. -Fix formatting issues by running `make style-fix` prior to committing. +Fix formatting issues by running `make style-fix-changed` prior to committing. ## Comments @@ -296,7 +408,7 @@ ## Parentheses Don't assume the reader knows C operator precedence by heart for complex -statements, add parentheses to ease readability. +statements, add parentheses to ease readability and make the intent clear. ## ifdef hell diff -Nru librdkafka-1.9.2/debian/changelog librdkafka-2.0.2/debian/changelog --- librdkafka-1.9.2/debian/changelog 2023-01-12 02:13:14.000000000 +0000 +++ librdkafka-2.0.2/debian/changelog 2023-02-09 02:22:10.000000000 +0000 @@ -1,3 +1,10 @@ +librdkafka (2.0.2-1) unstable; urgency=medium + + * New upstream release. + - Update librdkafka1.symbols to add new (2.0.0) symbols. + + -- Faidon Liambotis Thu, 09 Feb 2023 04:22:10 +0200 + librdkafka (1.9.2-1) unstable; urgency=medium * New upstream release. diff -Nru librdkafka-1.9.2/debian/control librdkafka-2.0.2/debian/control --- librdkafka-1.9.2/debian/control 2023-01-12 02:13:14.000000000 +0000 +++ librdkafka-2.0.2/debian/control 2023-02-09 01:54:29.000000000 +0000 @@ -16,7 +16,7 @@ Standards-Version: 4.6.2 Rules-Requires-Root: no Section: libs -Homepage: https://github.com/edenhill/librdkafka +Homepage: https://github.com/confluentinc/librdkafka Vcs-Git: https://salsa.debian.org/kafka-team/librdkafka.git Vcs-Browser: https://salsa.debian.org/kafka-team/librdkafka diff -Nru librdkafka-1.9.2/debian/copyright librdkafka-2.0.2/debian/copyright --- librdkafka-1.9.2/debian/copyright 2023-01-12 02:13:14.000000000 +0000 +++ librdkafka-2.0.2/debian/copyright 2023-02-09 01:54:29.000000000 +0000 @@ -1,6 +1,6 @@ Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: librdkafka -Source: https://github.com/edenhill/librdkafka +Source: https://github.com/confluentinc/librdkafka Files: * Copyright: 2012-2022, Magnus Edenhill diff -Nru librdkafka-1.9.2/debian/librdkafka1.symbols librdkafka-2.0.2/debian/librdkafka1.symbols --- librdkafka-1.9.2/debian/librdkafka1.symbols 2023-01-12 02:13:14.000000000 +0000 +++ librdkafka-2.0.2/debian/librdkafka1.symbols 2023-02-09 02:19:57.000000000 +0000 @@ -17,12 +17,19 @@ rd_kafka_AdminOptions_destroy@Base 0.11.5 rd_kafka_AdminOptions_new@Base 0.11.5 rd_kafka_AdminOptions_set_broker@Base 0.11.5 + rd_kafka_AdminOptions_set_match_consumer_group_states@Base 2.0.0 rd_kafka_AdminOptions_set_opaque@Base 0.11.5 rd_kafka_AdminOptions_set_operation_timeout@Base 0.11.5 rd_kafka_AdminOptions_set_request_timeout@Base 0.11.5 + rd_kafka_AdminOptions_set_require_stable_offsets@Base 2.0.0 rd_kafka_AdminOptions_set_validate_only@Base 0.11.5 rd_kafka_AlterConfigs@Base 0.11.5 rd_kafka_AlterConfigs_result_resources@Base 0.11.5 + rd_kafka_AlterConsumerGroupOffsets@Base 2.0.0 + rd_kafka_AlterConsumerGroupOffsets_destroy@Base 2.0.0 + rd_kafka_AlterConsumerGroupOffsets_destroy_array@Base 2.0.0 + rd_kafka_AlterConsumerGroupOffsets_new@Base 2.0.0 + rd_kafka_AlterConsumerGroupOffsets_result_groups@Base 2.0.0 rd_kafka_ConfigEntry_is_default@Base 0.11.5 rd_kafka_ConfigEntry_is_read_only@Base 0.11.5 rd_kafka_ConfigEntry_is_sensitive@Base 0.11.5 @@ -41,6 +48,17 @@ rd_kafka_ConfigResource_set_config@Base 0.11.5 rd_kafka_ConfigResource_type@Base 0.11.5 rd_kafka_ConfigSource_name@Base 0.11.5 + rd_kafka_ConsumerGroupDescription_coordinator@Base 2.0.0 + rd_kafka_ConsumerGroupDescription_error@Base 2.0.0 + rd_kafka_ConsumerGroupDescription_group_id@Base 2.0.0 + rd_kafka_ConsumerGroupDescription_is_simple_consumer_group@Base 2.0.0 + rd_kafka_ConsumerGroupDescription_member@Base 2.0.0 + rd_kafka_ConsumerGroupDescription_member_count@Base 2.0.0 + rd_kafka_ConsumerGroupDescription_partition_assignor@Base 2.0.0 + rd_kafka_ConsumerGroupDescription_state@Base 2.0.0 + rd_kafka_ConsumerGroupListing_group_id@Base 2.0.0 + rd_kafka_ConsumerGroupListing_is_simple_consumer_group@Base 2.0.0 + rd_kafka_ConsumerGroupListing_state@Base 2.0.0 rd_kafka_CreateAcls@Base 1.9.0 rd_kafka_CreateAcls_result_acls@Base 1.9.0 rd_kafka_CreatePartitions@Base 0.11.5 @@ -75,6 +93,22 @@ rd_kafka_DescribeAcls_result_acls@Base 1.9.0 rd_kafka_DescribeConfigs@Base 0.11.5 rd_kafka_DescribeConfigs_result_resources@Base 0.11.5 + rd_kafka_DescribeConsumerGroups@Base 2.0.0 + rd_kafka_DescribeConsumerGroups_result_groups@Base 2.0.0 + rd_kafka_ListConsumerGroupOffsets@Base 2.0.0 + rd_kafka_ListConsumerGroupOffsets_destroy@Base 2.0.0 + rd_kafka_ListConsumerGroupOffsets_destroy_array@Base 2.0.0 + rd_kafka_ListConsumerGroupOffsets_new@Base 2.0.0 + rd_kafka_ListConsumerGroupOffsets_result_groups@Base 2.0.0 + rd_kafka_ListConsumerGroups@Base 2.0.0 + rd_kafka_ListConsumerGroups_result_errors@Base 2.0.0 + rd_kafka_ListConsumerGroups_result_valid@Base 2.0.0 + rd_kafka_MemberAssignment_partitions@Base 2.0.0 + rd_kafka_MemberDescription_assignment@Base 2.0.0 + rd_kafka_MemberDescription_client_id@Base 2.0.0 + rd_kafka_MemberDescription_consumer_id@Base 2.0.0 + rd_kafka_MemberDescription_group_instance_id@Base 2.0.0 + rd_kafka_MemberDescription_host@Base 2.0.0 rd_kafka_NewPartitions_destroy@Base 0.11.5 rd_kafka_NewPartitions_destroy_array@Base 0.11.5 rd_kafka_NewPartitions_new@Base 0.11.5 @@ -84,6 +118,9 @@ rd_kafka_NewTopic_new@Base 0.11.5 rd_kafka_NewTopic_set_config@Base 0.11.5 rd_kafka_NewTopic_set_replica_assignment@Base 0.11.5 + rd_kafka_Node_host@Base 2.0.0 + rd_kafka_Node_id@Base 2.0.0 + rd_kafka_Node_port@Base 2.0.0 rd_kafka_ResourcePatternType_name@Base 1.9.0 rd_kafka_ResourceType_name@Base 0.11.5 rd_kafka_abort_transaction@Base 1.4.2 @@ -131,6 +168,7 @@ rd_kafka_conf_set_opaque@Base 0.8.0 rd_kafka_conf_set_open_cb@Base 0.8.4 rd_kafka_conf_set_rebalance_cb@Base 0.9.0 + rd_kafka_conf_set_resolve_cb@Base 2.0.0 rd_kafka_conf_set_socket_cb@Base 0.8.4 rd_kafka_conf_set_ssl_cert@Base 1.2.1 rd_kafka_conf_set_ssl_cert_verify_cb@Base 1.2.1 @@ -154,6 +192,8 @@ rd_kafka_consumer_group_metadata_new_with_genid@Base 1.6.0 rd_kafka_consumer_group_metadata_read@Base 1.4.2 rd_kafka_consumer_group_metadata_write@Base 1.4.2 + rd_kafka_consumer_group_state_code@Base 2.0.0 + rd_kafka_consumer_group_state_name@Base 2.0.0 rd_kafka_consumer_poll@Base 0.9.0 rd_kafka_controllerid@Base 0.11.5 rd_kafka_default_topic_conf_dup@Base 0.11.4 @@ -173,6 +213,7 @@ rd_kafka_error_string@Base 1.4.2 rd_kafka_error_txn_requires_abort@Base 1.4.2 rd_kafka_event_AlterConfigs_result@Base 0.11.5 + rd_kafka_event_AlterConsumerGroupOffsets_result@Base 2.0.0 rd_kafka_event_CreateAcls_result@Base 1.9.0 rd_kafka_event_CreatePartitions_result@Base 0.11.5 rd_kafka_event_CreateTopics_result@Base 0.11.5 @@ -183,6 +224,9 @@ rd_kafka_event_DeleteTopics_result@Base 0.11.5 rd_kafka_event_DescribeAcls_result@Base 1.9.0 rd_kafka_event_DescribeConfigs_result@Base 0.11.5 + rd_kafka_event_DescribeConsumerGroups_result@Base 2.0.0 + rd_kafka_event_ListConsumerGroupOffsets_result@Base 2.0.0 + rd_kafka_event_ListConsumerGroups_result@Base 2.0.0 rd_kafka_event_config_string@Base 1.2.1 rd_kafka_event_debug_contexts@Base 1.5.0 rd_kafka_event_destroy@Base 0.9.2 @@ -222,6 +266,7 @@ rd_kafka_incremental_unassign@Base 1.6.0 rd_kafka_init_transactions@Base 1.4.2 rd_kafka_interceptor_add_on_acknowledgement@Base 0.11.0 + rd_kafka_interceptor_add_on_broker_state_change@Base 2.0.0 rd_kafka_interceptor_add_on_commit@Base 0.11.0 rd_kafka_interceptor_add_on_consume@Base 0.11.0 rd_kafka_interceptor_add_on_destroy@Base 0.11.0 @@ -249,6 +294,7 @@ rd_kafka_message_timestamp@Base 0.9.1 rd_kafka_metadata@Base 0.8.4 rd_kafka_metadata_destroy@Base 0.8.4 + rd_kafka_mock_broker_error_stack_cnt@Base 2.0.0 rd_kafka_mock_broker_push_request_error_rtts@Base 1.8.0 rd_kafka_mock_broker_set_down@Base 1.4.2 rd_kafka_mock_broker_set_rack@Base 1.3.0 @@ -311,6 +357,7 @@ rd_kafka_rebalance_protocol@Base 1.6.0 rd_kafka_resume_partitions@Base 0.9.1 rd_kafka_sasl_background_callbacks_enable@Base 1.9.0 + rd_kafka_sasl_set_credentials@Base 2.0.0 rd_kafka_seek@Base 0.9.0 rd_kafka_seek_partitions@Base 1.6.0 rd_kafka_send_offsets_to_transaction@Base 1.4.2 diff -Nru librdkafka-1.9.2/debian/upstream/metadata librdkafka-2.0.2/debian/upstream/metadata --- librdkafka-1.9.2/debian/upstream/metadata 2023-01-12 00:26:43.000000000 +0000 +++ librdkafka-2.0.2/debian/upstream/metadata 2023-02-09 01:54:29.000000000 +0000 @@ -1,4 +1,4 @@ -Repository: https://github.com/edenhill/librdkafka.git -Repository-Browse: https://github.com/edenhill/librdkafka -Bug-Database: https://github.com/edenhill/librdkafka/issues -Bug-Submit: https://github.com/edenhill/librdkafka/issues/new +Repository: https://github.com/confluentinc/librdkafka.git +Repository-Browse: https://github.com/confluentinc/librdkafka +Bug-Database: https://github.com/confluentinc/librdkafka/issues +Bug-Submit: https://github.com/confluentinc/librdkafka/issues/new diff -Nru librdkafka-1.9.2/debian/watch librdkafka-2.0.2/debian/watch --- librdkafka-1.9.2/debian/watch 2023-01-12 00:26:43.000000000 +0000 +++ librdkafka-2.0.2/debian/watch 2023-02-09 01:56:45.000000000 +0000 @@ -1,2 +1,4 @@ version=4 -https://github.com/edenhill/librdkafka/tags .*/v?(\d[\d\.]*)\.tar\.gz +opts=uversionmangle=s/-(RC|rc|a|b|c)/~$1/,\ + filenamemangle=s/v?(@ANY_VERSION@@ARCHIVE_EXT@)/@PACKAGE@-$1/ \ +https://github.com/confluentinc/@PACKAGE@/tags .*/v?@ANY_VERSION@\.tar\.gz diff -Nru librdkafka-1.9.2/.doozer.json librdkafka-2.0.2/.doozer.json --- librdkafka-1.9.2/.doozer.json 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/.doozer.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -{ - "targets": { - "xenial-amd64": { - - "buildenv": "xenial-amd64", - "builddeps": [ - "build-essential", - "python3", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -C tests build" - ], - "testcmd": [ - "make -C tests run_local_quick" - ], - }, - - "xenial-i386": { - "_comment": "including liblz4-dev here to verify that WITH_LZ4_EXT works", - "buildenv": "xenial-i386", - "builddeps": [ - "build-essential", - "python3", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "liblz4-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -C tests build" - ], - "testcmd": [ - "make -C tests run_local_quick" - ], - }, - - "xenial-armhf": { - - "buildenv": "xenial-armhf", - "builddeps": [ - "build-essential", - "python3", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -j ${PARALLEL} -C tests build", - ], - "testcmd": [ - "cd tests", - "./run-test.sh -p1 -l", - "cd .." - ], - }, - - "stretch-mips": { - - "buildenv": "stretch-mips", - "builddeps": [ - "build-essential", - "python3", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "libzstd-dev" - ], - "buildcmd": [ - "./configure", - "make -j ${PARALLEL}", - "make -j ${PARALLEL} -C tests build", - ], - "testcmd": [ - "cd tests", - "./run-test.sh -p1 -l", - "cd .." - ], - }, - - "cmake-xenial-amd64": { - - "buildenv": "xenial-amd64", - "builddeps": [ - "build-essential", - "python3", - "zlib1g-dev", - "libssl-dev", - "libsasl2-dev", - "cmake" - ], - "buildcmd": [ - "cmake -H. -B_builds -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=Debug", - "cmake --build _builds", - ], - "testcmd": [ - "cd _builds", - "ctest -VV -R RdKafkaTestBrokerLess" - ], - } - }, - "artifacts": ["config.log", "Makefile.config", "config.h"] -} diff -Nru librdkafka-1.9.2/Doxyfile librdkafka-2.0.2/Doxyfile --- librdkafka-1.9.2/Doxyfile 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/Doxyfile 2023-01-20 09:14:36.000000000 +0000 @@ -1260,7 +1260,7 @@ HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). +# (YES) or that it should be included in the primary .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. diff -Nru librdkafka-1.9.2/examples/alter_consumer_group_offsets.c librdkafka-2.0.2/examples/alter_consumer_group_offsets.c --- librdkafka-1.9.2/examples/alter_consumer_group_offsets.c 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/examples/alter_consumer_group_offsets.c 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,338 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * AlterConsumerGroupOffsets usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "Alter consumer group offsets usage examples\n" + "\n" + "Usage: %s \n" + " \n" + " \n" + " ...\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + + +static void +print_partition_list(FILE *fp, + const rd_kafka_topic_partition_list_t *partitions, + int print_offset, + const char *prefix) { + int i; + + if (partitions->cnt == 0) { + fprintf(fp, "%sNo partition found", prefix); + } + for (i = 0; i < partitions->cnt; i++) { + char offset_string[512] = {}; + *offset_string = '\0'; + if (print_offset) { + snprintf(offset_string, sizeof(offset_string), + " offset %" PRId64, + partitions->elems[i].offset); + } + fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s", + i > 0 ? "\n" : "", prefix, partitions->elems[i].topic, + partitions->elems[i].partition, offset_string, + rd_kafka_err2str(partitions->elems[i].err)); + } + fprintf(fp, "\n"); +} + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + unsigned long n = strtoull(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + +static void +cmd_alter_consumer_group_offsets(rd_kafka_conf_t *conf, int argc, char **argv) { + char errstr[512]; /* librdkafka API error reporting buffer */ + rd_kafka_t *rk; /* Admin client instance */ + rd_kafka_AdminOptions_t *options; /* (Optional) Options for + * AlterConsumerGroupOffsets() */ + rd_kafka_event_t *event; /* AlterConsumerGroupOffsets result event */ + const int min_argc = 2; + int i, num_partitions = 0; + const char *group_id, *topic; + rd_kafka_AlterConsumerGroupOffsets_t *alter_consumer_group_offsets; + + /* + * Argument validation + */ + if (argc < min_argc || (argc - min_argc) % 2 != 0) { + usage("Wrong number of arguments"); + } + + num_partitions = (argc - min_argc) / 2; + group_id = argv[0]; + topic = argv[1]; + + /* + * Create an admin client, it can be created using any client type, + * so we choose producer since it requires no extra configuration + * and is more light-weight than the consumer. + * + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) { + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); + exit(1); + } + + /* The Admin API is completely asynchronous, results are emitted + * on the result queue that is passed to AlterConsumerGroupOffsets() */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + /* Set timeout (optional) */ + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS); + if (rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + exit(1); + } + + /* Read passed partition-offsets */ + rd_kafka_topic_partition_list_t *partitions = + rd_kafka_topic_partition_list_new(num_partitions); + for (i = 0; i < num_partitions; i++) { + rd_kafka_topic_partition_list_add( + partitions, topic, + parse_int("partition", argv[min_argc + i * 2])) + ->offset = parse_int("offset", argv[min_argc + 1 + i * 2]); + } + + /* Create argument */ + alter_consumer_group_offsets = + rd_kafka_AlterConsumerGroupOffsets_new(group_id, partitions); + /* Call AlterConsumerGroupOffsets */ + rd_kafka_AlterConsumerGroupOffsets(rk, &alter_consumer_group_offsets, 1, + options, queue); + + /* Clean up input arguments */ + rd_kafka_AlterConsumerGroupOffsets_destroy( + alter_consumer_group_offsets); + rd_kafka_AdminOptions_destroy(options); + rd_kafka_topic_partition_list_destroy(partitions); + + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (30s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + /* AlterConsumerGroupOffsets request failed */ + fprintf(stderr, "%% AlterConsumerGroupOffsets failed: %s\n", + rd_kafka_event_error_string(event)); + exit(1); + + } else { + /* AlterConsumerGroupOffsets request succeeded, but individual + * partitions may have errors. */ + const rd_kafka_AlterConsumerGroupOffsets_result_t *result; + const rd_kafka_group_result_t **groups; + size_t n_groups, i; + + result = rd_kafka_event_AlterConsumerGroupOffsets_result(event); + groups = rd_kafka_AlterConsumerGroupOffsets_result_groups( + result, &n_groups); + + printf("AlterConsumerGroupOffsets results:\n"); + for (i = 0; i < n_groups; i++) { + const rd_kafka_group_result_t *group = groups[i]; + const rd_kafka_topic_partition_list_t *partitions = + rd_kafka_group_result_partitions(group); + print_partition_list(stderr, partitions, 1, " "); + } + } + + /* Destroy event object when we're done with it. + * Note: rd_kafka_event_destroy() allows a NULL event. */ + rd_kafka_event_destroy(event); + + /* Destroy queue */ + rd_kafka_queue_destroy(queue); + + /* Destroy the producer instance */ + rd_kafka_destroy(rk); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_alter_consumer_group_offsets(conf, argc - optind, &argv[optind]); + + return 0; +} diff -Nru librdkafka-1.9.2/examples/describe_consumer_groups.c librdkafka-2.0.2/examples/describe_consumer_groups.c --- librdkafka-1.9.2/examples/describe_consumer_groups.c 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/examples/describe_consumer_groups.c 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,373 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * DescribeConsumerGroups usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "Describe groups usage examples\n" + "\n" + "Usage: %s ...\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + + +static void +print_partition_list(FILE *fp, + const rd_kafka_topic_partition_list_t *partitions, + int print_offset, + const char *prefix) { + int i; + + if (partitions->cnt == 0) { + fprintf(fp, "%sNo partition found", prefix); + } + for (i = 0; i < partitions->cnt; i++) { + char offset_string[512] = {}; + *offset_string = '\0'; + if (print_offset) { + snprintf(offset_string, sizeof(offset_string), + " offset %" PRId64, + partitions->elems[i].offset); + } + fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s", + i > 0 ? "\n" : "", prefix, partitions->elems[i].topic, + partitions->elems[i].partition, offset_string, + rd_kafka_err2str(partitions->elems[i].err)); + } + fprintf(fp, "\n"); +} + +/** + * @brief Print group information. + */ +static int +print_groups_info(const rd_kafka_DescribeConsumerGroups_result_t *grpdesc, + int groups_cnt) { + size_t i; + const rd_kafka_ConsumerGroupDescription_t **result_groups; + size_t result_groups_cnt; + result_groups = rd_kafka_DescribeConsumerGroups_result_groups( + grpdesc, &result_groups_cnt); + + if (result_groups_cnt == 0) { + if (groups_cnt > 0) { + fprintf(stderr, "No matching groups found\n"); + return 1; + } else { + fprintf(stderr, "No groups in cluster\n"); + } + } + + for (i = 0; i < result_groups_cnt; i++) { + int j, member_cnt; + const rd_kafka_error_t *error; + const rd_kafka_ConsumerGroupDescription_t *group = + result_groups[i]; + char coordinator_desc[512]; + const rd_kafka_Node_t *coordinator = NULL; + const char *group_id = + rd_kafka_ConsumerGroupDescription_group_id(group); + const char *partition_assignor = + rd_kafka_ConsumerGroupDescription_partition_assignor(group); + rd_kafka_consumer_group_state_t state = + rd_kafka_ConsumerGroupDescription_state(group); + member_cnt = + rd_kafka_ConsumerGroupDescription_member_count(group); + error = rd_kafka_ConsumerGroupDescription_error(group); + coordinator = + rd_kafka_ConsumerGroupDescription_coordinator(group); + *coordinator_desc = '\0'; + + if (coordinator != NULL) { + snprintf(coordinator_desc, sizeof(coordinator_desc), + ", coordinator [id: %" PRId32 + ", host: %s" + ", port: %" PRIu16 "]", + rd_kafka_Node_id(coordinator), + rd_kafka_Node_host(coordinator), + rd_kafka_Node_port(coordinator)); + } + printf( + "Group \"%s\", partition assignor \"%s\", " + "state %s%s, with %" PRId32 " member(s)", + group_id, partition_assignor, + rd_kafka_consumer_group_state_name(state), coordinator_desc, + member_cnt); + if (error) + printf(" error[%" PRId32 "]: %s", + rd_kafka_error_code(error), + rd_kafka_error_string(error)); + printf("\n"); + for (j = 0; j < member_cnt; j++) { + const rd_kafka_MemberDescription_t *member = + rd_kafka_ConsumerGroupDescription_member(group, j); + printf( + " Member \"%s\" with client-id %s," + " group instance id: %s, host %s\n", + rd_kafka_MemberDescription_consumer_id(member), + rd_kafka_MemberDescription_client_id(member), + rd_kafka_MemberDescription_group_instance_id( + member), + rd_kafka_MemberDescription_host(member)); + const rd_kafka_MemberAssignment_t *assignment = + rd_kafka_MemberDescription_assignment(member); + const rd_kafka_topic_partition_list_t + *topic_partitions = + rd_kafka_MemberAssignment_partitions( + assignment); + if (!topic_partitions) { + printf(" No assignment\n"); + } else if (topic_partitions->cnt == 0) { + printf(" Empty assignment\n"); + } else { + printf(" Assignment:\n"); + print_partition_list(stdout, topic_partitions, + 0, " "); + } + } + } + return 0; +} + +/** + * @brief Call rd_kafka_DescribeConsumerGroups() with a list of + * groups. + */ +static void +cmd_describe_consumer_groups(rd_kafka_conf_t *conf, int argc, char **argv) { + rd_kafka_t *rk; + const char **groups = NULL; + char errstr[512]; + rd_kafka_AdminOptions_t *options; + rd_kafka_event_t *event = NULL; + int retval = 0; + int groups_cnt = 0; + + if (argc >= 1) { + groups = (const char **)&argv[0]; + groups_cnt = argc; + } + + /* + * Create consumer instance + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + if (!rk) + fatal("Failed to create new consumer: %s", errstr); + + /* + * Describe consumer groups + */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + + if (rd_kafka_AdminOptions_set_request_timeout( + options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + goto exit; + } + + rd_kafka_DescribeConsumerGroups(rk, groups, groups_cnt, options, queue); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (10s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + rd_kafka_resp_err_t err = rd_kafka_event_error(event); + /* DescribeConsumerGroups request failed */ + fprintf(stderr, + "%% DescribeConsumerGroups failed[%" PRId32 "]: %s\n", + err, rd_kafka_event_error_string(event)); + goto exit; + + } else { + /* DescribeConsumerGroups request succeeded, but individual + * groups may have errors. */ + const rd_kafka_DescribeConsumerGroups_result_t *result; + + result = rd_kafka_event_DescribeConsumerGroups_result(event); + printf("DescribeConsumerGroups results:\n"); + retval = print_groups_info(result, groups_cnt); + } + + +exit: + if (event) + rd_kafka_event_destroy(event); + rd_kafka_AdminOptions_destroy(options); + rd_kafka_queue_destroy(queue); + /* Destroy the client instance */ + rd_kafka_destroy(rk); + + exit(retval); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_describe_consumer_groups(conf, argc - optind, &argv[optind]); + + return 0; +} diff -Nru librdkafka-1.9.2/examples/.gitignore librdkafka-2.0.2/examples/.gitignore --- librdkafka-1.9.2/examples/.gitignore 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/examples/.gitignore 2023-01-20 09:14:36.000000000 +0000 @@ -1,13 +1,19 @@ -rdkafka_example -rdkafka_performance -rdkafka_example_cpp -rdkafka_complex_consumer_example -rdkafka_complex_consumer_example_cpp +consumer +delete_records +idempotent_producer kafkatest_verifiable_client +misc +openssl_engine_example_cpp producer producer_cpp -consumer -idempotent_producer +rdkafka_complex_consumer_example +rdkafka_complex_consumer_example_cpp rdkafka_consume_batch +rdkafka_example +rdkafka_example_cpp +rdkafka_performance transactions -delete_records +list_consumer_groups +describe_consumer_groups +list_consumer_group_offsets +alter_consumer_group_offsets diff -Nru librdkafka-1.9.2/examples/idempotent_producer.c librdkafka-2.0.2/examples/idempotent_producer.c --- librdkafka-1.9.2/examples/idempotent_producer.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/examples/idempotent_producer.c 2023-01-20 09:14:36.000000000 +0000 @@ -275,7 +275,8 @@ * * The internal queue is limited by the * configuration property - * queue.buffering.max.messages */ + * queue.buffering.max.messages and + * queue.buffering.max.kbytes */ rd_kafka_poll(rk, 1000 /*block for max 1000ms*/); goto retry; diff -Nru librdkafka-1.9.2/examples/list_consumer_group_offsets.c librdkafka-2.0.2/examples/list_consumer_group_offsets.c --- librdkafka-1.9.2/examples/list_consumer_group_offsets.c 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/examples/list_consumer_group_offsets.c 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,359 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * ListConsumerGroupOffsets usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "List consumer group offsets usage examples\n" + "\n" + "Usage: %s " + "\n" + " \n" + " \n" + " ...\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + + +static void +print_partition_list(FILE *fp, + const rd_kafka_topic_partition_list_t *partitions, + int print_offset, + const char *prefix) { + int i; + + if (partitions->cnt == 0) { + fprintf(fp, "%sNo partition found", prefix); + } + for (i = 0; i < partitions->cnt; i++) { + char offset_string[512] = {}; + *offset_string = '\0'; + if (print_offset) { + snprintf(offset_string, sizeof(offset_string), + " offset %" PRId64, + partitions->elems[i].offset); + } + fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s", + i > 0 ? "\n" : "", prefix, partitions->elems[i].topic, + partitions->elems[i].partition, offset_string, + rd_kafka_err2str(partitions->elems[i].err)); + } + fprintf(fp, "\n"); +} + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + unsigned long n = strtoull(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + +static void +cmd_list_consumer_group_offsets(rd_kafka_conf_t *conf, int argc, char **argv) { + char errstr[512]; /* librdkafka API error reporting buffer */ + rd_kafka_t *rk; /* Admin client instance */ + rd_kafka_AdminOptions_t *options; /* (Optional) Options for + * ListConsumerGroupOffsets() */ + rd_kafka_event_t *event; /* ListConsumerGroupOffsets result event */ + const int min_argc = 2; + char *topic; + int partition; + int require_stable_offsets = 0, num_partitions = 0; + rd_kafka_ListConsumerGroupOffsets_t *list_cgrp_offsets; + rd_kafka_error_t *error; + const char *group; + + /* + * Argument validation + */ + if (argc < min_argc || (argc - min_argc) % 2 != 0) + usage("Wrong number of arguments"); + else { + require_stable_offsets = + parse_int("require_stable_offsets", argv[1]); + if (require_stable_offsets < 0 || require_stable_offsets > 1) + usage("Require stable not a 0-1 int"); + } + + num_partitions = (argc - min_argc) / 2; + group = argv[0]; + + /* + * Create an admin client, it can be created using any client type, + * so we choose producer since it requires no extra configuration + * and is more light-weight than the consumer. + * + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!rk) { + fprintf(stderr, "%% Failed to create new producer: %s\n", + errstr); + exit(1); + } + + /* The Admin API is completely asynchronous, results are emitted + * on the result queue that is passed to ListConsumerGroupOffsets() */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + /* Set timeout (optional) */ + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS); + if (rd_kafka_AdminOptions_set_request_timeout( + options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + exit(1); + } + /* Set requested require stable offsets */ + if ((error = rd_kafka_AdminOptions_set_require_stable_offsets( + options, require_stable_offsets))) { + fprintf(stderr, "%% Failed to set require stable offsets: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + exit(1); + } + + /* Read passed partition-offsets */ + rd_kafka_topic_partition_list_t *partitions = NULL; + if (num_partitions > 0) { + int i; + partitions = rd_kafka_topic_partition_list_new(num_partitions); + for (i = 0; i < num_partitions; i++) { + topic = argv[min_argc + i * 2]; + partition = + parse_int("partition", argv[min_argc + i * 2 + 1]); + rd_kafka_topic_partition_list_add(partitions, topic, + partition); + } + } + + /* Create argument */ + list_cgrp_offsets = + rd_kafka_ListConsumerGroupOffsets_new(group, partitions); + /* Call ListConsumerGroupOffsets */ + rd_kafka_ListConsumerGroupOffsets(rk, &list_cgrp_offsets, 1, options, + queue); + + /* Clean up input arguments */ + rd_kafka_ListConsumerGroupOffsets_destroy(list_cgrp_offsets); + rd_kafka_AdminOptions_destroy(options); + + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (30s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + /* ListConsumerGroupOffsets request failed */ + fprintf(stderr, "%% ListConsumerGroupOffsets failed: %s\n", + rd_kafka_event_error_string(event)); + exit(1); + + } else { + /* ListConsumerGroupOffsets request succeeded, but individual + * partitions may have errors. */ + const rd_kafka_ListConsumerGroupOffsets_result_t *result; + const rd_kafka_group_result_t **groups; + size_t n_groups, i; + + result = rd_kafka_event_ListConsumerGroupOffsets_result(event); + groups = rd_kafka_ListConsumerGroupOffsets_result_groups( + result, &n_groups); + + printf("ListConsumerGroupOffsets results:\n"); + for (i = 0; i < n_groups; i++) { + const rd_kafka_group_result_t *group = groups[i]; + const rd_kafka_topic_partition_list_t *partitions = + rd_kafka_group_result_partitions(group); + print_partition_list(stderr, partitions, 1, " "); + } + } + + if (partitions) + rd_kafka_topic_partition_list_destroy(partitions); + + /* Destroy event object when we're done with it. + * Note: rd_kafka_event_destroy() allows a NULL event. */ + rd_kafka_event_destroy(event); + + /* Destroy queue */ + rd_kafka_queue_destroy(queue); + + /* Destroy the producer instance */ + rd_kafka_destroy(rk); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_list_consumer_group_offsets(conf, argc - optind, &argv[optind]); + + return 0; +} diff -Nru librdkafka-1.9.2/examples/list_consumer_groups.c librdkafka-2.0.2/examples/list_consumer_groups.c --- librdkafka-1.9.2/examples/list_consumer_groups.c 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/examples/list_consumer_groups.c 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,330 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * ListConsumerGroups usage example. + */ + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include "../win32/wingetopt.h" +#else +#include +#endif + + +/* Typical include path would be , but this program + * is builtin from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" + + +const char *argv0; + +static rd_kafka_queue_t *queue; /** Admin result queue. + * This is a global so we can + * yield in stop() */ +static volatile sig_atomic_t run = 1; + +/** + * @brief Signal termination of program + */ +static void stop(int sig) { + if (!run) { + fprintf(stderr, "%% Forced termination\n"); + exit(2); + } + run = 0; + rd_kafka_queue_yield(queue); +} + + +static void usage(const char *reason, ...) { + + fprintf(stderr, + "List groups usage examples\n" + "\n" + "Usage: %s ...\n" + "\n" + "Options:\n" + " -b Bootstrap server list to connect to.\n" + " -X Set librdkafka configuration property.\n" + " See CONFIGURATION.md for full list.\n" + " -d Enable librdkafka debugging (%s).\n" + "\n", + argv0, rd_kafka_get_debug_contexts()); + + if (reason) { + va_list ap; + char reasonbuf[512]; + + va_start(ap, reason); + vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap); + va_end(ap); + + fprintf(stderr, "ERROR: %s\n", reasonbuf); + } + + exit(reason ? 1 : 0); +} + + +#define fatal(...) \ + do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(2); \ + } while (0) + + +/** + * @brief Set config property. Exit on failure. + */ +static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) { + char errstr[512]; + + if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) + fatal("Failed to set %s=%s: %s", name, val, errstr); +} + +/** + * @brief Print group information. + */ +static int print_groups_info(const rd_kafka_ListConsumerGroups_result_t *list) { + size_t i; + const rd_kafka_ConsumerGroupListing_t **result_groups; + const rd_kafka_error_t **errors; + size_t result_groups_cnt; + size_t result_error_cnt; + result_groups = + rd_kafka_ListConsumerGroups_result_valid(list, &result_groups_cnt); + errors = + rd_kafka_ListConsumerGroups_result_errors(list, &result_error_cnt); + + if (result_groups_cnt == 0) { + fprintf(stderr, "No matching groups found\n"); + } + + for (i = 0; i < result_groups_cnt; i++) { + const rd_kafka_ConsumerGroupListing_t *group = result_groups[i]; + const char *group_id = + rd_kafka_ConsumerGroupListing_group_id(group); + rd_kafka_consumer_group_state_t state = + rd_kafka_ConsumerGroupListing_state(group); + int is_simple_consumer_group = + rd_kafka_ConsumerGroupListing_is_simple_consumer_group( + group); + + printf("Group \"%s\", is simple %" PRId32 + ", " + "state %s", + group_id, is_simple_consumer_group, + rd_kafka_consumer_group_state_name(state)); + printf("\n"); + } + for (i = 0; i < result_error_cnt; i++) { + const rd_kafka_error_t *error = errors[i]; + printf("Error[%" PRId32 "]: %s\n", rd_kafka_error_code(error), + rd_kafka_error_string(error)); + } + return 0; +} + +/** + * @brief Parse an integer or fail. + */ +int64_t parse_int(const char *what, const char *str) { + char *end; + unsigned long n = strtoull(str, &end, 0); + + if (end != str + strlen(str)) { + fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n", + what, str); + exit(1); + } + + return (int64_t)n; +} + +/** + * @brief Call rd_kafka_ListConsumerGroups() with a list of + * groups. + */ +static void +cmd_list_consumer_groups(rd_kafka_conf_t *conf, int argc, char **argv) { + rd_kafka_t *rk; + const char **states_str = NULL; + char errstr[512]; + rd_kafka_AdminOptions_t *options; + rd_kafka_event_t *event = NULL; + rd_kafka_error_t *error = NULL; + int i; + int retval = 0; + int states_cnt = 0; + rd_kafka_consumer_group_state_t *states; + + + if (argc >= 1) { + states_str = (const char **)&argv[0]; + states_cnt = argc; + } + states = calloc(states_cnt, sizeof(rd_kafka_consumer_group_state_t)); + for (i = 0; i < states_cnt; i++) { + states[i] = parse_int("state code", states_str[i]); + } + + /* + * Create consumer instance + * NOTE: rd_kafka_new() takes ownership of the conf object + * and the application must not reference it again after + * this call. + */ + rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr)); + if (!rk) + fatal("Failed to create new consumer: %s", errstr); + + /* + * List consumer groups + */ + queue = rd_kafka_queue_new(rk); + + /* Signal handler for clean shutdown */ + signal(SIGINT, stop); + + options = + rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); + + if (rd_kafka_AdminOptions_set_request_timeout( + options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) { + fprintf(stderr, "%% Failed to set timeout: %s\n", errstr); + goto exit; + } + + if ((error = rd_kafka_AdminOptions_set_match_consumer_group_states( + options, states, states_cnt))) { + fprintf(stderr, "%% Failed to set states: %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + goto exit; + } + free(states); + + rd_kafka_ListConsumerGroups(rk, options, queue); + rd_kafka_AdminOptions_destroy(options); + + /* Wait for results */ + event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by + * the request timeout set + * above (10s) */); + + if (!event) { + /* User hit Ctrl-C, + * see yield call in stop() signal handler */ + fprintf(stderr, "%% Cancelled by user\n"); + + } else if (rd_kafka_event_error(event)) { + rd_kafka_resp_err_t err = rd_kafka_event_error(event); + /* ListConsumerGroups request failed */ + fprintf(stderr, + "%% ListConsumerGroups failed[%" PRId32 "]: %s\n", err, + rd_kafka_event_error_string(event)); + goto exit; + + } else { + /* ListConsumerGroups request succeeded, but individual + * groups may have errors. */ + const rd_kafka_ListConsumerGroups_result_t *result; + + result = rd_kafka_event_ListConsumerGroups_result(event); + printf("ListConsumerGroups results:\n"); + retval = print_groups_info(result); + } + + +exit: + if (event) + rd_kafka_event_destroy(event); + rd_kafka_queue_destroy(queue); + /* Destroy the client instance */ + rd_kafka_destroy(rk); + + exit(retval); +} + +int main(int argc, char **argv) { + rd_kafka_conf_t *conf; /**< Client configuration object */ + int opt; + argv0 = argv[0]; + + /* + * Create Kafka client configuration place-holder + */ + conf = rd_kafka_conf_new(); + + + /* + * Parse common options + */ + while ((opt = getopt(argc, argv, "b:X:d:")) != -1) { + switch (opt) { + case 'b': + conf_set(conf, "bootstrap.servers", optarg); + break; + + case 'X': { + char *name = optarg, *val; + + if (!(val = strchr(name, '='))) + fatal("-X expects a name=value argument"); + + *val = '\0'; + val++; + + conf_set(conf, name, val); + break; + } + + case 'd': + conf_set(conf, "debug", optarg); + break; + + default: + usage("Unknown option %c", (char)opt); + } + } + + cmd_list_consumer_groups(conf, argc - optind, &argv[optind]); + + return 0; +} diff -Nru librdkafka-1.9.2/examples/Makefile librdkafka-2.0.2/examples/Makefile --- librdkafka-1.9.2/examples/Makefile 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/examples/Makefile 2023-01-20 09:14:36.000000000 +0000 @@ -4,6 +4,10 @@ producer consumer idempotent_producer transactions \ delete_records \ openssl_engine_example_cpp \ + list_consumer_groups \ + describe_consumer_groups \ + list_consumer_group_offsets \ + alter_consumer_group_offsets \ misc all: $(EXAMPLES) @@ -60,6 +64,22 @@ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ ../src/librdkafka.a $(LIBS) +list_consumer_groups: ../src/librdkafka.a list_consumer_groups.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +describe_consumer_groups: ../src/librdkafka.a describe_consumer_groups.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +list_consumer_group_offsets: ../src/librdkafka.a list_consumer_group_offsets.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + +alter_consumer_group_offsets: ../src/librdkafka.a alter_consumer_group_offsets.c + $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \ + ../src/librdkafka.a $(LIBS) + rdkafka_complex_consumer_example: ../src/librdkafka.a rdkafka_complex_consumer_example.c $(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_complex_consumer_example.c -o $@ $(LDFLAGS) \ ../src/librdkafka.a $(LIBS) diff -Nru librdkafka-1.9.2/examples/misc.c librdkafka-2.0.2/examples/misc.c --- librdkafka-1.9.2/examples/misc.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/examples/misc.c 2023-01-20 09:14:36.000000000 +0000 @@ -174,7 +174,7 @@ /* * Print group information */ - for (i = 0; grplist->group_cnt; i++) { + for (i = 0; i < grplist->group_cnt; i++) { int j; const struct rd_kafka_group_info *grp = &grplist->groups[i]; diff -Nru librdkafka-1.9.2/examples/producer.c librdkafka-2.0.2/examples/producer.c --- librdkafka-1.9.2/examples/producer.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/examples/producer.c 2023-01-20 09:14:36.000000000 +0000 @@ -203,7 +203,8 @@ * * The internal queue is limited by the * configuration property - * queue.buffering.max.messages */ + * queue.buffering.max.messages and + * queue.buffering.max.kbytes */ rd_kafka_poll(rk, 1000 /*block for max 1000ms*/); goto retry; diff -Nru librdkafka-1.9.2/examples/producer.cpp librdkafka-2.0.2/examples/producer.cpp --- librdkafka-1.9.2/examples/producer.cpp 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/examples/producer.cpp 2023-01-20 09:14:36.000000000 +0000 @@ -189,7 +189,7 @@ * * The internal queue is limited by the * configuration property - * queue.buffering.max.messages */ + * queue.buffering.max.messages and queue.buffering.max.kbytes */ producer->poll(1000 /*block for max 1000ms*/); goto retry; } diff -Nru librdkafka-1.9.2/examples/README.md librdkafka-2.0.2/examples/README.md --- librdkafka-1.9.2/examples/README.md 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/examples/README.md 2023-01-20 09:14:36.000000000 +0000 @@ -28,3 +28,11 @@ * [rdkafka_performance.c](rdkafka_performance.c) - performance, benchmark, latency producer and consumer tool. * [kafkatest_verifiable_client.cpp](kafkatest_verifiable_client.cpp) - for use with the official Apache Kafka client system tests. * [openssl_engine_example.cpp](openssl_engine_example.cpp) - metadata listing in C++ over SSL channel established using OpenSSL engine. + + + For Admin API examples see: + * [delete_records.c](delete_records.c) - Delete records. + * [list_consumer_groups.c](list_consumer_groups.c) - List consumer groups. + * [describe_consumer_groups.c](describe_consumer_groups.c) - Describe consumer groups. + * [list_consumer_group_offsets.c](list_consumer_group_offsets.c) - List offsets of a consumer group. + * [alter_consumer_group_offsets.c](alter_consumer_group_offsets.c) - Alter offsets of a consumer group. diff -Nru librdkafka-1.9.2/.github/workflows/base.yml librdkafka-2.0.2/.github/workflows/base.yml --- librdkafka-1.9.2/.github/workflows/base.yml 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/.github/workflows/base.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -name: check -on: [push, pull_request] -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - run: | - sudo apt update - sudo apt install -y python3 python3-pip python3-setuptools libcurl4-openssl-dev libssl-dev libsasl2-dev - python3 -m pip install -r tests/requirements.txt - - run: | - ./configure --CFLAGS="-std=c99" --CXXFLAGS="-std=c++98" --install-deps --enable-devel --disable-lz4-ext --prefix="$PWD/dest" - - run: | - make -j - make -C tests -j build - - run: | - examples/rdkafka_example -V || true - examples/rdkafka_example -X builtin.features - - run: | - make -C tests run_local_quick - - style: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - run: | - sudo apt update - sudo apt install -y python3 python3-pip python3-setuptools clang-format - python3 -m pip install -r packaging/tools/requirements.txt - - name: Style checker - run: make style-check diff -Nru librdkafka-1.9.2/INTRODUCTION.md librdkafka-2.0.2/INTRODUCTION.md --- librdkafka-1.9.2/INTRODUCTION.md 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/INTRODUCTION.md 2023-01-20 09:14:36.000000000 +0000 @@ -653,7 +653,7 @@ ##### RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID Returned by broker when the PID+Epoch is unknown, which may occur when -the PID's state has expired (due to topic retention, DeleteRercords, +the PID's state has expired (due to topic retention, DeleteRecords, or compaction). The Java producer added quite a bit of error handling for this case, @@ -1345,8 +1345,9 @@ `rd_kafka_produce()` is a non-blocking API, it will enqueue the message on an internal queue and return immediately. -If the number of queued messages would exceed the `queue.buffering.max.messages` -configuration property then `rd_kafka_produce()` returns -1 and sets errno +If the new message would cause the internal queue to exceed +`queue.buffering.max.messages` or `queue.buffering.max.kbytes` +configuration properties, `rd_kafka_produce()` returns -1 and sets errno to `ENOBUFS` and last_error to `RD_KAFKA_RESP_ERR__QUEUE_FULL`, thus providing a backpressure mechanism. @@ -1867,14 +1868,14 @@ | KIP-124 - Request rate quotas | 0.11.0.0 | Partially supported (depending on protocol request) | | KIP-126 - Producer ensure proper batch size after compression | 0.11.0.0 | Supported | | KIP-133 - AdminAPI: DescribeConfigs and AlterConfigs | 0.11.0.0 | Supported | -| KIP-140 - AdminAPI: ACLs | 0.11.0.0 | Not supported | +| KIP-140 - AdminAPI: ACLs | 0.11.0.0 | Supported | | KIP-144 - Broker reconnect backoff | 0.11.0.0 | Supported | | KIP-152 - Improved SASL auth error messages | 1.0.0 | Supported | | KIP-192 - Cleaner idempotence semantics | 1.0.0 | Not supported (superceeded by KIP-360) | | KIP-195 - AdminAPI: CreatePartitions | 1.0.0 | Supported | | KIP-204 - AdminAPI: DeleteRecords | 1.1.0 | Supported | | KIP-219 - Client-side throttling | 2.0.0 | Not supported | -| KIP-222 - AdminAPI: Consumer group operations | 2.0.0 | Not supported (but some APIs available outside Admin client) | +| KIP-222 - AdminAPI: Consumer group operations | 2.0.0 | Supported | | KIP-223 - Consumer partition lead metric | 2.0.0 | Not supported | | KIP-226 - AdminAPI: Dynamic broker config | 1.1.0 | Supported | | KIP-227 - Consumer Incremental Fetch | 1.1.0 | Not supported | @@ -1901,7 +1902,7 @@ | KIP-389 - Consumer group max size | 2.2.0 | Supported (error is propagated to application, but the consumer does not raise a fatal error) | | KIP-392 - Allow consumers to fetch from closest replica | 2.4.0 | Supported | | KIP-394 - Consumer: require member.id in JoinGroupRequest | 2.2.0 | Supported | -| KIP-396 - AdminAPI: commit/list offsets | 2.4.0 | Not supported (but some APIs available outside Admin client) | +| KIP-396 - AdminAPI: commit/list offsets | 2.4.0 | Partially supported (remaining APIs available outside Admin client) | | KIP-412 - AdminAPI: adjust log levels | 2.4.0 | Not supported | | KIP-421 - Variables in client config files | 2.3.0 | Not applicable (librdkafka, et.al, does not provide a config file interface, and shouldn't) | | KIP-429 - Consumer: incremental rebalance protocol | 2.4.0 | Supported | @@ -1918,7 +1919,7 @@ | KIP-511 - Collect Client's Name and Version | 2.4.0 | Supported | | KIP-514 - Bounded flush() | 2.4.0 | Supported | | KIP-517 - Consumer poll() metrics | 2.4.0 | Not supported | -| KIP-518 - Allow listing consumer groups per state | 2.6.0 | Not supported | +| KIP-518 - Allow listing consumer groups per state | 2.6.0 | Supported | | KIP-519 - Make SSL engine configurable | 2.6.0 | Supported | | KIP-525 - Return topic metadata and configs in CreateTopics response | 2.4.0 | Not supported | | KIP-526 - Reduce Producer Metadata Lookups for Large Number of Topics | 2.5.0 | Not supported | @@ -1942,40 +1943,40 @@ ### Supported protocol versions -"Kafka max" is the maximum ApiVersion supported in Apache Kafka 2.4.0, while +"Kafka max" is the maximum ApiVersion supported in Apache Kafka 3.3.1, while "librdkafka max" is the maximum ApiVersion supported in the latest release of librdkafka. | ApiKey | Request name | Kafka max | librdkafka max | | ------- | ------------------- | ----------- | ----------------------- | -| 0 | Produce | 7 | 7 | -| 1 | Fetch | 11 | 11 | -| 2 | ListOffsets | 5 | 1 | -| 3 | Metadata | 8 | 2 | -| 8 | OffsetCommit | 7 | 7 | -| 9 | OffsetFetch | 5 | 1 | -| 10 | FindCoordinator | 2 | 2 | -| 11 | JoinGroup | 5 | 5 | -| 12 | Heartbeat | 3 | 3 | -| 13 | LeaveGroup | 3 | 1 | -| 14 | SyncGroup | 3 | 3 | -| 15 | DescribeGroups | 4 | 0 | -| 16 | ListGroups | 2 | 0 | +| 0 | Produce | 9 | 7 | +| 1 | Fetch | 13 | 11 | +| 2 | ListOffsets | 7 | 2 | +| 3 | Metadata | 12 | 4 | +| 8 | OffsetCommit | 8 | 7 | +| 9 | OffsetFetch | 8 | 7 | +| 10 | FindCoordinator | 4 | 2 | +| 11 | JoinGroup | 9 | 5 | +| 12 | Heartbeat | 4 | 3 | +| 13 | LeaveGroup | 5 | 1 | +| 14 | SyncGroup | 5 | 3 | +| 15 | DescribeGroups | 5 | 4 | +| 16 | ListGroups | 4 | 4 | | 17 | SaslHandshake | 1 | 1 | | 18 | ApiVersions | 3 | 3 | -| 19 | CreateTopics | 5 | 4 | -| 20 | DeleteTopics | 3 | 1 | +| 19 | CreateTopics | 7 | 4 | +| 20 | DeleteTopics | 6 | 1 | | 21 | DeleteRecords | 2 | 1 | | 22 | InitProducerId | 4 | 4 | -| 24 | AddPartitionsToTxn | 1 | 0 | -| 25 | AddOffsetsToTxn | 1 | 0 | -| 26 | EndTxn | 1 | 1 | -| 28 | TxnOffsetCommit | 2 | 0 | -| 32 | DescribeConfigs | 2 | 1 | -| 33 | AlterConfigs | 1 | 0 | -| 36 | SaslAuthenticate | 1 | 0 | -| 37 | CreatePartitions | 1 | 0 | +| 24 | AddPartitionsToTxn | 3 | 0 | +| 25 | AddOffsetsToTxn | 3 | 0 | +| 26 | EndTxn | 3 | 1 | +| 28 | TxnOffsetCommit | 3 | 3 | +| 32 | DescribeConfigs | 4 | 1 | +| 33 | AlterConfigs | 2 | 0 | +| 36 | SaslAuthenticate | 2 | 0 | +| 37 | CreatePartitions | 3 | 0 | | 42 | DeleteGroups | 2 | 1 | | 47 | OffsetDelete | 0 | 0 | diff -Nru librdkafka-1.9.2/mklove/modules/configure.libcurl librdkafka-2.0.2/mklove/modules/configure.libcurl --- librdkafka-1.9.2/mklove/modules/configure.libcurl 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/mklove/modules/configure.libcurl 2023-01-20 09:14:36.000000000 +0000 @@ -45,8 +45,8 @@ function install_source { local name=$1 local destdir=$2 - local ver=7.84.0 - local checksum="3c6893d38d054d4e378267166858698899e9d87258e8ff1419d020c395384535" + local ver=7.86.0 + local checksum="3dfdd39ba95e18847965cd3051ea6d22586609d9011d91df7bc5521288987a82" echo "### Installing $name $ver from source to $destdir" if [[ ! -f Makefile ]]; then diff -Nru librdkafka-1.9.2/mklove/modules/configure.libssl librdkafka-2.0.2/mklove/modules/configure.libssl --- librdkafka-1.9.2/mklove/modules/configure.libssl 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/mklove/modules/configure.libssl 2023-01-20 09:14:36.000000000 +0000 @@ -81,15 +81,29 @@ function libcrypto_install_source { local name=$1 local destdir=$2 - local ver=1.1.1q - local checksum="d7939ce614029cdff0b6c20f0e2e5703158a489a72b2507b8bd51bf8c8fd10ca" + local ver=3.0.7 + local checksum="83049d042a260e696f62406ac5c08bf706fd84383f945cf21bd61e9ed95c396e" local url=https://www.openssl.org/source/openssl-${ver}.tar.gz - local conf_args="--prefix=/usr --openssldir=/usr/lib/ssl no-shared no-zlib no-deprecated" + local conf_args="--prefix=/usr --openssldir=/usr/lib/ssl no-shared no-zlib" + if [[ $ver == 1.0.* ]]; then conf_args="${conf_args} no-krb5" fi + if [[ $ver == 3.* ]]; then + # Silence OpenSSL 3.0.0 deprecation warnings since they'll make + # -Werror fail. + mkl_define_set "libcrypto" OPENSSL_SUPPRESS_DEPRECATED + # Make sure legacy provider (et.al) are built-in, since we're building + # a static library we don't want to rely on dynamically loaded modules. + conf_args="${conf_args} no-module" + else + # OpenSSL 3 deprecates ENGINE support, but we still need it, so only + # add no-deprecated to non-3.x builds. + conf_args="${conf_args} no-deprecated" + fi + # 1.1.1q tests fail to build on OSX/M1, so disable them. if [[ $MKL_DISTRO == osx && $ver == 1.1.1q ]]; then conf_args="${conf_args} no-tests" diff -Nru librdkafka-1.9.2/mklove/modules/configure.libzstd librdkafka-2.0.2/mklove/modules/configure.libzstd --- librdkafka-1.9.2/mklove/modules/configure.libzstd 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/mklove/modules/configure.libzstd 2023-01-20 09:14:36.000000000 +0000 @@ -42,8 +42,8 @@ function install_source { local name=$1 local destdir=$2 - local ver=1.5.0 - local checksum="5194fbfa781fcf45b98c5e849651aa7b3b0a008c6b72d4a0db760f3002291e94" + local ver=1.5.2 + local checksum="7c42d56fac126929a6a85dbc73ff1db2411d04f104fae9bdea51305663a83fd0" echo "### Installing $name $ver from source to $destdir" if [[ ! -f Makefile ]]; then diff -Nru librdkafka-1.9.2/mklove/modules/configure.zlib librdkafka-2.0.2/mklove/modules/configure.zlib --- librdkafka-1.9.2/mklove/modules/configure.zlib 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/mklove/modules/configure.zlib 2023-01-20 09:14:36.000000000 +0000 @@ -42,13 +42,13 @@ function install_source { local name=$1 local destdir=$2 - local ver=1.2.12 - local checksum="91844808532e5ce316b3c010929493c0244f3d37593afd6de04f71821d5136d9" + local ver=1.2.13 + local checksum="b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30" echo "### Installing $name $ver from source to $destdir" if [[ ! -f Makefile ]]; then mkl_download_archive \ - "https://zlib.net/zlib-${ver}.tar.gz" \ + "https://zlib.net/fossils/zlib-${ver}.tar.gz" \ "256" \ "$checksum" || return 1 fi diff -Nru librdkafka-1.9.2/packaging/mingw-w64/configure-build-msys2-mingw.sh librdkafka-2.0.2/packaging/mingw-w64/configure-build-msys2-mingw.sh --- librdkafka-1.9.2/packaging/mingw-w64/configure-build-msys2-mingw.sh 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/packaging/mingw-w64/configure-build-msys2-mingw.sh 2023-01-20 09:14:36.000000000 +0000 @@ -8,11 +8,6 @@ -D WITHOUT_WIN32_CONFIG=ON \ -D RDKAFKA_BUILD_EXAMPLES=ON \ -D RDKAFKA_BUILD_TESTS=ON \ - -D WITH_LIBDL=OFF \ - -D WITH_PLUGINS=OFF \ - -D WITH_SASL=ON \ - -D WITH_SSL=ON \ - -D WITH_ZLIB=OFF \ -D RDKAFKA_BUILD_STATIC=OFF \ -D CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=TRUE . @@ -22,4 +17,5 @@ cd tests cp ../dest/bin/librdkafka.dll ./ cp ../dest/bin/librdkafka++.dll ./ -./test-runner.exe -l -Q -p1 0000 +CI=true ./test-runner.exe -l -Q +cd .. diff -Nru librdkafka-1.9.2/packaging/mingw-w64/configure-build-msys2-mingw-static.sh librdkafka-2.0.2/packaging/mingw-w64/configure-build-msys2-mingw-static.sh --- librdkafka-1.9.2/packaging/mingw-w64/configure-build-msys2-mingw-static.sh 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/packaging/mingw-w64/configure-build-msys2-mingw-static.sh 2023-01-20 09:14:36.000000000 +0000 @@ -14,11 +14,11 @@ # Bundle all the static dependencies with the static lib we just built mkdir mergescratch pushd mergescratch -cp /C/tools/msys64/mingw64/lib/libzstd.a ./ -cp /C/tools/msys64/mingw64/lib/libcrypto.a ./ -cp /C/tools/msys64/mingw64/lib/liblz4.a ./ -cp /C/tools/msys64/mingw64/lib/libssl.a ./ -cp /C/tools/msys64/mingw64/lib/libz.a ./ +cp /C/msys64/mingw64/lib/libzstd.a ./ +cp /C/msys64/mingw64/lib/libcrypto.a ./ +cp /C/msys64/mingw64/lib/liblz4.a ./ +cp /C/msys64/mingw64/lib/libssl.a ./ +cp /C/msys64/mingw64/lib/libz.a ./ cp ../src/librdkafka.a ./ # Have to rename because ar won't work with + in the name diff -Nru librdkafka-1.9.2/packaging/mingw-w64/semaphoreci-build.sh librdkafka-2.0.2/packaging/mingw-w64/semaphoreci-build.sh --- librdkafka-1.9.2/packaging/mingw-w64/semaphoreci-build.sh 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/packaging/mingw-w64/semaphoreci-build.sh 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,38 @@ +#!/bin/bash +# + +set -ex + +if [[ $1 == "--static" ]]; then + linkage="static" + shift +else +linkage="dynamic" +fi + +if [[ -z $1 ]]; then + echo "Usage: $0 [--static] " + exit 1 +fi + +archive="${PWD}/$1" + +source ./packaging/mingw-w64/travis-before-install.sh + +if [[ $linkage == "static" ]]; then + ./packaging/mingw-w64/configure-build-msys2-mingw-static.sh +else + ./packaging/mingw-w64/configure-build-msys2-mingw.sh +fi + + +./packaging/mingw-w64/run-tests.sh + +pushd dest +tar cvzf $archive . +sha256sum $archive +popd + + + + diff -Nru librdkafka-1.9.2/packaging/mingw-w64/travis-before-install.sh librdkafka-2.0.2/packaging/mingw-w64/travis-before-install.sh --- librdkafka-1.9.2/packaging/mingw-w64/travis-before-install.sh 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/packaging/mingw-w64/travis-before-install.sh 2023-01-20 09:14:36.000000000 +0000 @@ -2,30 +2,19 @@ set -e -# Slightly modified from: -# https://docs.travis-ci.com/user/reference/windows/#how-do-i-use-msys2 -case $TRAVIS_OS_NAME in - windows) - [[ ! -f C:/tools/msys64/msys2_shell.cmd ]] && rm -rf C:/tools/msys64 - choco uninstall -y mingw - choco install -y msys2 - - export msys2='cmd //C RefreshEnv.cmd ' - export msys2+='& set MSYS=winsymlinks:nativestrict ' - export msys2+='& C:\\tools\\msys64\\msys2_shell.cmd -defterm -no-start' - export mingw64="$msys2 -mingw64 -full-path -here -c "\"\$@"\" --" - export msys2+=" -msys2 -c "\"\$@"\" --" - - # Have to update pacman first or choco upgrade will failure due to migration - # to zstd instead of xz compression - $msys2 pacman -Sy --noconfirm pacman - choco upgrade --no-progress -y msys2 - - ## Install more MSYS2 packages from https://packages.msys2.org/base here - $msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-gcc mingw-w64-x86_64-make mingw-w64-x86_64-cmake mingw-w64-x86_64-openssl mingw-w64-x86_64-lz4 mingw-w64-x86_64-zstd - - taskkill //IM gpg-agent.exe //F || true # https://travis-ci.community/t/4967 - export PATH=/C/tools/msys64/mingw64/bin:$PATH - export MAKE=mingw32-make # so that Autotools can find it - ;; -esac +export msys2='cmd //C RefreshEnv.cmd ' +export msys2+='& set MSYS=winsymlinks:nativestrict ' +export msys2+='& C:\\msys64\\msys2_shell.cmd -defterm -no-start' +export mingw64="$msys2 -mingw64 -full-path -here -c "\"\$@"\" --" +export msys2+=" -msys2 -c "\"\$@"\" --" + +# Have to update pacman first or choco upgrade will failure due to migration +# to zstd instead of xz compression +$msys2 pacman -Sy --noconfirm pacman + +## Install more MSYS2 packages from https://packages.msys2.org/base here +$msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-gcc mingw-w64-x86_64-make mingw-w64-x86_64-cmake mingw-w64-x86_64-openssl mingw-w64-x86_64-lz4 mingw-w64-x86_64-zstd + +taskkill //IM gpg-agent.exe //F || true # https://travis-ci.community/t/4967 +export PATH=/C/msys64/mingw64/bin:$PATH +export MAKE=mingw32-make # so that Autotools can find it diff -Nru librdkafka-1.9.2/packaging/nuget/nugetpackage.py librdkafka-2.0.2/packaging/nuget/nugetpackage.py --- librdkafka-1.9.2/packaging/nuget/nugetpackage.py 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/packaging/nuget/nugetpackage.py 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,285 @@ +#!/usr/bin/env python3 +# +# Create NuGet package +# + +import os +import tempfile +import shutil +import subprocess +from packaging import Package, Mapping + + +class NugetPackage (Package): + """ All platforms, archs, et.al, are bundled into one set of + NuGet output packages: "main", redist and symbols """ + + # See .semamphore/semaphore.yml for where these are built. + mappings = [ + Mapping({'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/include/librdkafka/rdkafka.h', + 'build/native/include/librdkafka/rdkafka.h'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/include/librdkafka/rdkafkacpp.h', + 'build/native/include/librdkafka/rdkafkacpp.h'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/include/librdkafka/rdkafka_mock.h', + 'build/native/include/librdkafka/rdkafka_mock.h'), + + Mapping({'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/share/doc/librdkafka/README.md', + 'README.md'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/share/doc/librdkafka/CONFIGURATION.md', + 'CONFIGURATION.md'), + Mapping({'arch': 'x64', + 'plat': 'osx', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/share/doc/librdkafka/LICENSES.txt', + 'LICENSES.txt'), + + # OSX x64 + Mapping({'arch': 'x64', + 'plat': 'osx'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.dylib', + 'runtimes/osx-x64/native/librdkafka.dylib'), + # OSX arm64 + Mapping({'arch': 'arm64', + 'plat': 'osx'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.1.dylib', + 'runtimes/osx-arm64/native/librdkafka.dylib'), + + # Linux glibc centos6 x64 with GSSAPI + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos6', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.so.1', + 'runtimes/linux-x64/native/librdkafka.so'), + # Linux glibc centos6 x64 without GSSAPI (no external deps) + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos6', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.so.1', + 'runtimes/linux-x64/native/centos6-librdkafka.so'), + # Linux glibc centos7 x64 with GSSAPI + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos7', + 'lnk': 'std'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.so.1', + 'runtimes/linux-x64/native/centos7-librdkafka.so'), + # Linux glibc centos7 arm64 without GSSAPI (no external deps) + Mapping({'arch': 'arm64', + 'plat': 'linux', + 'dist': 'centos7', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.so.1', + 'runtimes/linux-arm64/native/librdkafka.so'), + + # Linux musl alpine x64 without GSSAPI (no external deps) + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'alpine', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka.so.1', + 'runtimes/linux-x64/native/alpine-librdkafka.so'), + + # Common Win runtime + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'msvcr140.zip', + 'vcruntime140.dll', + 'runtimes/win-x64/native/vcruntime140.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'msvcr140.zip', + 'msvcp140.dll', 'runtimes/win-x64/native/msvcp140.dll'), + # matches librdkafka.redist.{VER}.nupkg + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/librdkafka.dll', + 'runtimes/win-x64/native/librdkafka.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/librdkafkacpp.dll', + 'runtimes/win-x64/native/librdkafkacpp.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/libcrypto-3-x64.dll', + 'runtimes/win-x64/native/libcrypto-3-x64.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/libssl-3-x64.dll', + 'runtimes/win-x64/native/libssl-3-x64.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/zlib1.dll', + 'runtimes/win-x64/native/zlib1.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/zstd.dll', + 'runtimes/win-x64/native/zstd.dll'), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/x64/Release/libcurl.dll', + 'runtimes/win-x64/native/libcurl.dll'), + # matches librdkafka.{VER}.nupkg + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka*.nupkg', + 'build/native/lib/v142/x64/Release/librdkafka.lib', + 'build/native/lib/win/x64/win-x64-Release/v142/librdkafka.lib', # noqa: E501 + artifact_fname_excludes=['redist', 'symbols']), + Mapping({'arch': 'x64', + 'plat': 'win'}, + 'librdkafka*.nupkg', + 'build/native/lib/v142/x64/Release/librdkafkacpp.lib', + 'build/native/lib/win/x64/win-x64-Release/v142/librdkafkacpp.lib', # noqa: E501 + artifact_fname_excludes=['redist', 'symbols']), + + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'msvcr140.zip', + 'vcruntime140.dll', + 'runtimes/win-x86/native/vcruntime140.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'msvcr140.zip', + 'msvcp140.dll', 'runtimes/win-x86/native/msvcp140.dll'), + # matches librdkafka.redist.{VER}.nupkg + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/librdkafka.dll', + 'runtimes/win-x86/native/librdkafka.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/librdkafkacpp.dll', + 'runtimes/win-x86/native/librdkafkacpp.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/libcrypto-3.dll', + 'runtimes/win-x86/native/libcrypto-3.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/libssl-3.dll', + 'runtimes/win-x86/native/libssl-3.dll'), + + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/zlib1.dll', + 'runtimes/win-x86/native/zlib1.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/zstd.dll', + 'runtimes/win-x86/native/zstd.dll'), + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka.redist*', + 'build/native/bin/v142/Win32/Release/libcurl.dll', + 'runtimes/win-x86/native/libcurl.dll'), + + # matches librdkafka.{VER}.nupkg + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka*.nupkg', + 'build/native/lib/v142/Win32/Release/librdkafka.lib', + 'build/native/lib/win/x86/win-x86-Release/v142/librdkafka.lib', # noqa: E501 + artifact_fname_excludes=['redist', 'symbols']), + + Mapping({'arch': 'x86', + 'plat': 'win'}, + 'librdkafka*.nupkg', + 'build/native/lib/v142/Win32/Release/librdkafkacpp.lib', + 'build/native/lib/win/x86/win-x86-Release/v142/librdkafkacpp.lib', # noqa: E501 + artifact_fname_excludes=['redist', 'symbols']) + ] + + def __init__(self, version, arts): + if version.startswith('v'): + version = version[1:] # Strip v prefix + super(NugetPackage, self).__init__(version, arts) + + def cleanup(self): + if os.path.isdir(self.stpath): + shutil.rmtree(self.stpath) + + def build(self, buildtype): + """ Build single NuGet package for all its artifacts. """ + + # NuGet removes the prefixing v from the version. + vless_version = self.kv['version'] + if vless_version[0] == 'v': + vless_version = vless_version[1:] + + self.stpath = tempfile.mkdtemp(prefix="out-", suffix="-%s" % buildtype, + dir=".") + + self.render('librdkafka.redist.nuspec') + self.copy_template('librdkafka.redist.targets', + destpath=os.path.join('build', 'native')) + self.copy_template('librdkafka.redist.props', + destpath='build') + + # Generate template tokens for artifacts + for a in self.arts.artifacts: + if 'bldtype' not in a.info: + a.info['bldtype'] = 'release' + + a.info['variant'] = '%s-%s-%s' % (a.info.get('plat'), + a.info.get('arch'), + a.info.get('bldtype')) + if 'toolset' not in a.info: + a.info['toolset'] = 'v142' + + # Apply mappings and extract files + self.apply_mappings() + + print('Tree extracted to %s' % self.stpath) + + # After creating a bare-bone nupkg layout containing the artifacts + # and some spec and props files, call the 'nuget' utility to + # make a proper nupkg of it (with all the metadata files). + subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % # noqa: E501 + (os.path.join(self.stpath, + 'librdkafka.redist.nuspec'), + self.stpath), shell=True) + + return 'librdkafka.redist.%s.nupkg' % vless_version diff -Nru librdkafka-1.9.2/packaging/nuget/packaging.py librdkafka-2.0.2/packaging/nuget/packaging.py --- librdkafka-1.9.2/packaging/nuget/packaging.py 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/packaging/nuget/packaging.py 2023-01-20 09:14:36.000000000 +0000 @@ -1,26 +1,31 @@ #!/usr/bin/env python3 # -# NuGet packaging script. -# Assembles a NuGet package using CI artifacts in S3 -# and calls nuget (in docker) to finalize the package. +# Packaging script. +# Assembles packages using CI artifacts. # import sys import re import os -import tempfile import shutil -import subprocess from fnmatch import fnmatch from string import Template -import boto3 from zfile import zfile +import boto3 import magic if sys.version_info[0] < 3: - from urllib import unquote + from urllib import unquote as _unquote else: - from urllib.parse import unquote + from urllib.parse import unquote as _unquote + + +def unquote(path): + # Removes URL escapes, and normalizes the path by removing ./. + path = _unquote(path) + if path[:2] == './': + return path[2:] + return path # Rename token values @@ -79,12 +84,15 @@ # p - project (e.g., "confluent-kafka-python") # bld - builder (e.g., "travis") # plat - platform ("osx", "linux", ..) +# dist - distro or runtime ("centos6", "mingw", "msvcr", "alpine", ..). # arch - arch ("x64", ..) # tag - git tag # sha - git sha # bid - builder's build-id # bldtype - Release, Debug (appveyor) -# lnk - std, static +# lnk - Linkage ("std", "static", "all" (both std and static)) +# extra - Extra build options, typically "gssapi" (for cyrus-sasl linking). + # # Example: # librdkafka/p-librdkafka__bld-travis__plat-linux__arch-x64__tag-v0.0.62__sha-d051b2c19eb0c118991cd8bc5cf86d8e5e446cde__bid-1562.1/librdkafka.tar.gz @@ -116,7 +124,7 @@ else: # Assign the map and convert all keys to lower case self.info = {k.lower(): v for k, v in info.items()} - # Rename values, e.g., 'plat':'linux' to 'plat':'debian' + # Rename values, e.g., 'plat':'windows' to 'plat':'win' for k, v in self.info.items(): rdict = rename_vals.get(k, None) if rdict is not None: @@ -203,7 +211,7 @@ unmatched = list() for m, v in self.match.items(): if m not in info or info[m] != v: - unmatched.append(m) + unmatched.append(f"{m} = {v}") # Make sure all matches were satisfied, unless this is a # common artifact. @@ -257,16 +265,50 @@ self.collect_single(f, req_tag) +class Mapping (object): + """ Maps/matches a file in an input release artifact to + the output location of the package, based on attributes and paths. """ + + def __init__(self, attributes, artifact_fname_glob, path_in_artifact, + output_pkg_path=None, artifact_fname_excludes=[]): + """ + @param attributes A dict of artifact attributes that must match. + If an attribute name (dict key) is prefixed + with "!" (e.g., "!plat") then the attribute + must not match. + @param artifact_fname_glob Match artifacts with this filename glob. + @param path_in_artifact On match, extract this file in the artifact,.. + @param output_pkg_path ..and write it to this location in the package. + Defaults to path_in_artifact. + @param artifact_fname_excludes Exclude artifacts matching these + filenames. + + Pass a list of Mapping objects to FIXME to perform all mappings. + """ + super(Mapping, self).__init__() + self.attributes = attributes + self.fname_glob = artifact_fname_glob + self.input_path = path_in_artifact + if output_pkg_path is None: + self.output_path = self.input_path + else: + self.output_path = output_pkg_path + self.name = self.output_path + self.fname_excludes = artifact_fname_excludes + + def __str__(self): + return self.name + + class Package (object): """ Generic Package class A Package is a working container for one or more output packages for a specific package type (e.g., nuget) """ - def __init__(self, version, arts, ptype): + def __init__(self, version, arts): super(Package, self).__init__() self.version = version self.arts = arts - self.ptype = ptype # These may be overwritten by specific sub-classes: self.artifacts = arts.artifacts # Staging path, filled in later. @@ -286,10 +328,6 @@ """ Optional cleanup routine for removing temporary files, etc. """ pass - def verify(self, path): - """ Optional post-build package verifier """ - pass - def render(self, fname, destpath='.'): """ Render template in file fname and save to destpath/fname, where destpath is relative to stpath """ @@ -321,472 +359,41 @@ self.add_file(outf) + def apply_mappings(self): + """ Applies a list of Mapping to match and extract files from + matching artifacts. If any of the listed Mappings can not be + fulfilled an exception is raised. """ -class NugetPackage (Package): - """ All platforms, archs, et.al, are bundled into one set of - NuGet output packages: "main", redist and symbols """ - - def __init__(self, version, arts): - if version.startswith('v'): - version = version[1:] # Strip v prefix - super(NugetPackage, self).__init__(version, arts, "nuget") - - def cleanup(self): - if os.path.isdir(self.stpath): - shutil.rmtree(self.stpath) - - def build(self, buildtype): - """ Build single NuGet package for all its artifacts. """ - - # NuGet removes the prefixing v from the version. - vless_version = self.kv['version'] - if vless_version[0] == 'v': - vless_version = vless_version[1:] - - self.stpath = tempfile.mkdtemp(prefix="out-", suffix="-%s" % buildtype, - dir=".") - - self.render('librdkafka.redist.nuspec') - self.copy_template('librdkafka.redist.targets', - destpath=os.path.join('build', 'native')) - self.copy_template('librdkafka.redist.props', - destpath='build') - - # Generate template tokens for artifacts - for a in self.arts.artifacts: - if 'bldtype' not in a.info: - a.info['bldtype'] = 'release' - - a.info['variant'] = '%s-%s-%s' % (a.info.get('plat'), - a.info.get('arch'), - a.info.get('bldtype')) - if 'toolset' not in a.info: - a.info['toolset'] = 'v142' - - mappings = [ - [{'arch': 'x64', - 'plat': 'linux', - 'lnk': 'std', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './include/librdkafka/rdkafka.h', - 'build/native/include/librdkafka/rdkafka.h'], - [{'arch': 'x64', - 'plat': 'linux', - 'lnk': 'std', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './include/librdkafka/rdkafkacpp.h', - 'build/native/include/librdkafka/rdkafkacpp.h'], - [{'arch': 'x64', - 'plat': 'linux', - 'lnk': 'std', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './include/librdkafka/rdkafka_mock.h', - 'build/native/include/librdkafka/rdkafka_mock.h'], - - [{'arch': 'x64', - 'plat': 'linux', - 'lnk': 'std', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './share/doc/librdkafka/README.md', - 'README.md'], - [{'arch': 'x64', - 'plat': 'linux', - 'lnk': 'std', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './share/doc/librdkafka/CONFIGURATION.md', - 'CONFIGURATION.md'], - # The above x64-linux gcc job generates a bad LICENSES.txt file, - # so we use the one from the osx job instead. - [{'arch': 'x64', - 'plat': 'osx', - 'lnk': 'std', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './share/doc/librdkafka/LICENSES.txt', - 'LICENSES.txt'], - - # Travis OSX x64 build - [{'arch': 'x64', 'plat': 'osx', - 'fname_glob': 'librdkafka-clang.tar.gz'}, - './lib/librdkafka.dylib', - 'runtimes/osx-x64/native/librdkafka.dylib'], - # Travis OSX arm64 build - [{'arch': 'arm64', 'plat': 'osx', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './lib/librdkafka.1.dylib', - 'runtimes/osx-arm64/native/librdkafka.dylib'], - # Travis Manylinux build - [{'arch': 'x64', - 'plat': 'linux', - 'fname_glob': 'librdkafka-manylinux*x86_64.tgz'}, - './lib/librdkafka.so.1', - 'runtimes/linux-x64/native/centos6-librdkafka.so'], - # Travis Ubuntu 14.04 build - [{'arch': 'x64', - 'plat': 'linux', - 'lnk': 'std', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './lib/librdkafka.so.1', - 'runtimes/linux-x64/native/librdkafka.so'], - # Travis CentOS 7 RPM build - [{'arch': 'x64', - 'plat': 'linux', - 'fname_glob': 'librdkafka1*el7.x86_64.rpm'}, - './usr/lib64/librdkafka.so.1', - 'runtimes/linux-x64/native/centos7-librdkafka.so'], - # Travis Alpine build - [{'arch': 'x64', 'plat': 'linux', - 'fname_glob': 'alpine-librdkafka.tgz'}, - 'librdkafka.so.1', - 'runtimes/linux-x64/native/alpine-librdkafka.so'], - # Travis arm64 Linux build - [{'arch': 'arm64', 'plat': 'linux', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './lib/librdkafka.so.1', - 'runtimes/linux-arm64/native/librdkafka.so'], - - # Common Win runtime - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, - 'vcruntime140.dll', - 'runtimes/win-x64/native/vcruntime140.dll'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, - 'msvcp140.dll', 'runtimes/win-x64/native/msvcp140.dll'], - # matches librdkafka.redist.{VER}.nupkg - [{'arch': 'x64', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/x64/Release/librdkafka.dll', - 'runtimes/win-x64/native/librdkafka.dll'], - [{'arch': 'x64', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/x64/Release/librdkafkacpp.dll', - 'runtimes/win-x64/native/librdkafkacpp.dll'], - [{'arch': 'x64', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/x64/Release/libcrypto-1_1-x64.dll', - 'runtimes/win-x64/native/libcrypto-1_1-x64.dll'], - [{'arch': 'x64', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/x64/Release/libssl-1_1-x64.dll', - 'runtimes/win-x64/native/libssl-1_1-x64.dll'], - [{'arch': 'x64', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/x64/Release/zlib1.dll', - 'runtimes/win-x64/native/zlib1.dll'], - [{'arch': 'x64', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/x64/Release/zstd.dll', - 'runtimes/win-x64/native/zstd.dll'], - [{'arch': 'x64', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/x64/Release/libcurl.dll', - 'runtimes/win-x64/native/libcurl.dll'], - # matches librdkafka.{VER}.nupkg - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', - 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v142/x64/Release/librdkafka.lib', - 'build/native/lib/win/x64/win-x64-Release/v142/librdkafka.lib'], - [{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', - 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v142/x64/Release/librdkafkacpp.lib', - 'build/native/lib/win/x64/win-x64-Release/v142/librdkafkacpp.lib'], # noqa: E501 - - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, - 'vcruntime140.dll', - 'runtimes/win-x86/native/vcruntime140.dll'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, - 'msvcp140.dll', 'runtimes/win-x86/native/msvcp140.dll'], - # matches librdkafka.redist.{VER}.nupkg - [{'arch': 'x86', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/Win32/Release/librdkafka.dll', - 'runtimes/win-x86/native/librdkafka.dll'], - [{'arch': 'x86', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/Win32/Release/librdkafkacpp.dll', - 'runtimes/win-x86/native/librdkafkacpp.dll'], - [{'arch': 'x86', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/Win32/Release/libcrypto-1_1.dll', - 'runtimes/win-x86/native/libcrypto-1_1.dll'], - [{'arch': 'x86', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/Win32/Release/libssl-1_1.dll', - 'runtimes/win-x86/native/libssl-1_1.dll'], - - [{'arch': 'x86', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/Win32/Release/zlib1.dll', - 'runtimes/win-x86/native/zlib1.dll'], - [{'arch': 'x86', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/Win32/Release/zstd.dll', - 'runtimes/win-x86/native/zstd.dll'], - [{'arch': 'x86', - 'plat': 'win', - 'fname_glob': 'librdkafka.redist*'}, - 'build/native/bin/v142/Win32/Release/libcurl.dll', - 'runtimes/win-x86/native/libcurl.dll'], - - # matches librdkafka.{VER}.nupkg - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', - 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v142/Win32/Release/librdkafka.lib', - 'build/native/lib/win/x86/win-x86-Release/v142/librdkafka.lib'], - [{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', - 'fname_excludes': ['redist', 'symbols']}, - 'build/native/lib/v142/Win32/Release/librdkafkacpp.lib', - 'build/native/lib/win/x86/win-x86-Release/v142/librdkafkacpp.lib'] - ] - - for m in mappings: - attributes = m[0] - fname_glob = attributes['fname_glob'] - del attributes['fname_glob'] - fname_excludes = [] - if 'fname_excludes' in attributes: - fname_excludes = attributes['fname_excludes'] - del attributes['fname_excludes'] - - outf = os.path.join(self.stpath, m[2]) - member = m[1] - - found = False - # Try all matching artifacts until we find the wanted file (member) - for a in self.arts.artifacts: - attr_match = True - for attr in attributes: - if a.info.get(attr, None) != attributes[attr]: - attr_match = False - break - - if not attr_match: - continue - - if not fnmatch(a.fname, fname_glob): - continue - - for exclude in fname_excludes: - if exclude in a.fname: - continue - - try: - zfile.ZFile.extract(a.lpath, member, outf) - except KeyError: - continue - except Exception as e: - raise Exception( - 'file not found in archive %s: %s. Files in archive are: %s' % # noqa: E501 - (a.lpath, e, zfile.ZFile( - a.lpath).getnames())) - - # Check that the file type matches. - if magic_mismatch(outf, a): - os.unlink(outf) - continue - - found = True - break - - if not found: - raise MissingArtifactError( - 'unable to find artifact with tags %s matching "%s" for file "%s"' % # noqa: E501 - (str(attributes), fname_glob, member)) + assert self.mappings + assert len(self.mappings) > 0 - print('Tree extracted to %s' % self.stpath) - - # After creating a bare-bone nupkg layout containing the artifacts - # and some spec and props files, call the 'nuget' utility to - # make a proper nupkg of it (with all the metadata files). - subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % # noqa: E501 - (os.path.join(self.stpath, - 'librdkafka.redist.nuspec'), - self.stpath), shell=True) - - return 'librdkafka.redist.%s.nupkg' % vless_version - - def verify(self, path): - """ Verify package """ - expect = [ - "librdkafka.redist.nuspec", - "README.md", - "CONFIGURATION.md", - "LICENSES.txt", - "build/librdkafka.redist.props", - "build/native/librdkafka.redist.targets", - "build/native/include/librdkafka/rdkafka.h", - "build/native/include/librdkafka/rdkafkacpp.h", - "build/native/include/librdkafka/rdkafka_mock.h", - "build/native/lib/win/x64/win-x64-Release/v142/librdkafka.lib", - "build/native/lib/win/x64/win-x64-Release/v142/librdkafkacpp.lib", - "build/native/lib/win/x86/win-x86-Release/v142/librdkafka.lib", - "build/native/lib/win/x86/win-x86-Release/v142/librdkafkacpp.lib", - "runtimes/linux-x64/native/centos7-librdkafka.so", - "runtimes/linux-x64/native/centos6-librdkafka.so", - "runtimes/linux-x64/native/alpine-librdkafka.so", - "runtimes/linux-x64/native/librdkafka.so", - "runtimes/linux-arm64/native/librdkafka.so", - "runtimes/osx-x64/native/librdkafka.dylib", - "runtimes/osx-arm64/native/librdkafka.dylib", - # win x64 - "runtimes/win-x64/native/librdkafka.dll", - "runtimes/win-x64/native/librdkafkacpp.dll", - "runtimes/win-x64/native/vcruntime140.dll", - "runtimes/win-x64/native/msvcp140.dll", - "runtimes/win-x64/native/libcrypto-1_1-x64.dll", - "runtimes/win-x64/native/libssl-1_1-x64.dll", - "runtimes/win-x64/native/zlib1.dll", - "runtimes/win-x64/native/zstd.dll", - "runtimes/win-x64/native/libcurl.dll", - # win x86 - "runtimes/win-x86/native/librdkafka.dll", - "runtimes/win-x86/native/librdkafkacpp.dll", - "runtimes/win-x86/native/vcruntime140.dll", - "runtimes/win-x86/native/msvcp140.dll", - "runtimes/win-x86/native/libcrypto-1_1.dll", - "runtimes/win-x86/native/libssl-1_1.dll", - "runtimes/win-x86/native/zlib1.dll", - "runtimes/win-x86/native/zstd.dll", - "runtimes/win-x86/native/libcurl.dll"] - - missing = list() - with zfile.ZFile(path, 'r') as zf: - print('Verifying %s:' % path) - - # Zipfiles may url-encode filenames, unquote them before matching. - pkgd = [unquote(x) for x in zf.getnames()] - missing = [x for x in expect if x not in pkgd] - - if len(missing) > 0: - print( - 'Missing files in package %s:\n%s' % - (path, '\n'.join(missing))) - return False - - print('OK - %d expected files found' % len(expect)) - return True - - -class StaticPackage (Package): - """ Create a package with all static libraries """ - - # Only match statically linked artifacts - match = {'lnk': 'static'} - - def __init__(self, version, arts): - super(StaticPackage, self).__init__(version, arts, "static") - - def cleanup(self): - if os.path.isdir(self.stpath): - shutil.rmtree(self.stpath) - - def build(self, buildtype): - """ Build single package for all artifacts. """ - - self.stpath = tempfile.mkdtemp(prefix="out-", dir=".") - - mappings = [ - # rdkafka.h - [{'arch': 'x64', - 'plat': 'linux', - 'fname_glob': 'librdkafka-clang.tar.gz'}, - './include/librdkafka/rdkafka.h', - 'rdkafka.h'], - - # LICENSES.txt - [{'arch': 'x64', - 'plat': 'osx', - 'fname_glob': 'librdkafka-clang.tar.gz'}, - './share/doc/librdkafka/LICENSES.txt', - 'LICENSES.txt'], - - # glibc linux static lib and pkg-config file - [{'arch': 'x64', - 'plat': 'linux', - 'fname_glob': 'librdkafka-clang.tar.gz'}, - './lib/librdkafka-static.a', - 'librdkafka_glibc_linux.a'], - [{'arch': 'x64', - 'plat': 'linux', - 'fname_glob': 'librdkafka-clang.tar.gz'}, - './lib/pkgconfig/rdkafka-static.pc', - 'librdkafka_glibc_linux.pc'], - - # musl linux static lib and pkg-config file - [{'arch': 'x64', - 'plat': 'linux', - 'fname_glob': 'alpine-librdkafka.tgz'}, - 'librdkafka-static.a', - 'librdkafka_musl_linux.a'], - [{'arch': 'x64', - 'plat': 'linux', - 'fname_glob': 'alpine-librdkafka.tgz'}, - 'rdkafka-static.pc', - 'librdkafka_musl_linux.pc'], - - # osx x64 static lib and pkg-config file - [{'arch': 'x64', 'plat': 'osx', - 'fname_glob': 'librdkafka-clang.tar.gz'}, - './lib/librdkafka-static.a', - 'librdkafka_darwin_amd64.a'], - [{'arch': 'x64', 'plat': 'osx', - 'fname_glob': 'librdkafka-clang.tar.gz'}, - './lib/pkgconfig/rdkafka-static.pc', - 'librdkafka_darwin_amd64.pc'], - - # osx arm64 static lib and pkg-config file - [{'arch': 'arm64', 'plat': 'osx', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './lib/librdkafka-static.a', - 'librdkafka_darwin_arm64.a'], - [{'arch': 'arm64', 'plat': 'osx', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './lib/pkgconfig/rdkafka-static.pc', - 'librdkafka_darwin_arm64.pc'], - - # win static lib and pkg-config file (mingw) - [{'arch': 'x64', 'plat': 'win', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './lib/librdkafka-static.a', 'librdkafka_windows.a'], - [{'arch': 'x64', 'plat': 'win', - 'fname_glob': 'librdkafka-gcc.tar.gz'}, - './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_windows.pc'], - ] - - for m in mappings: - attributes = m[0].copy() - attributes.update(self.match) - fname_glob = attributes['fname_glob'] - del attributes['fname_glob'] - fname_excludes = [] - if 'fname_excludes' in attributes: - fname_excludes = attributes['fname_excludes'] - del attributes['fname_excludes'] + for m in self.mappings: artifact = None for a in self.arts.artifacts: found = True - for attr in attributes: - if attr not in a.info or a.info[attr] != attributes[attr]: - found = False - break + for attr in m.attributes: + if attr[0] == '!': + # Require attribute NOT to match + origattr = attr + attr = attr[1:] + + if attr in a.info and \ + a.info[attr] != m.attributes[origattr]: + found = False + break + else: + # Require attribute to match + if attr not in a.info or \ + a.info[attr] != m.attributes[attr]: + found = False + break - if not fnmatch(a.fname, fname_glob): + if not fnmatch(a.fname, m.fname_glob): found = False - for exclude in fname_excludes: + for exclude in m.fname_excludes: if exclude in a.fname: found = False break @@ -797,45 +404,30 @@ if artifact is None: raise MissingArtifactError( - 'unable to find artifact with tags %s matching "%s"' % - (str(attributes), fname_glob)) + '%s: unable to find artifact with tags %s matching "%s"' % + (m, str(m.attributes), m.fname_glob)) + + output_path = os.path.join(self.stpath, m.output_path) - outf = os.path.join(self.stpath, m[2]) - member = m[1] try: - zfile.ZFile.extract(artifact.lpath, member, outf) - except KeyError as e: + zfile.ZFile.extract(artifact.lpath, m.input_path, output_path) +# except KeyError: +# continue + except Exception as e: raise Exception( - 'file not found in archive %s: %s. Files in archive are: %s' % # noqa: E501 - (artifact.lpath, e, zfile.ZFile( - artifact.lpath).getnames())) - - print('Tree extracted to %s' % self.stpath) - - # After creating a bare-bone layout, create a tarball. - outname = "librdkafka-static-bundle-%s.tgz" % self.version - print('Writing to %s' % outname) - subprocess.check_call("(cd %s && tar cvzf ../%s .)" % - (self.stpath, outname), - shell=True) + '%s: file not found in archive %s: %s. Files in archive are:\n%s' % # noqa: E501 + (m, artifact.lpath, e, '\n'.join(zfile.ZFile( + artifact.lpath).getnames()))) + + # Check that the file type matches. + if magic_mismatch(output_path, a): + os.unlink(output_path) + continue - return outname + # All mappings found and extracted. def verify(self, path): - """ Verify package """ - expect = [ - "./rdkafka.h", - "./LICENSES.txt", - "./librdkafka_glibc_linux.a", - "./librdkafka_glibc_linux.pc", - "./librdkafka_musl_linux.a", - "./librdkafka_musl_linux.pc", - "./librdkafka_darwin_amd64.a", - "./librdkafka_darwin_arm64.a", - "./librdkafka_darwin_amd64.pc", - "./librdkafka_darwin_arm64.pc", - "./librdkafka_windows.a", - "./librdkafka_windows.pc"] + """ Verify package content based on the previously defined mappings """ missing = list() with zfile.ZFile(path, 'r') as zf: @@ -843,13 +435,14 @@ # Zipfiles may url-encode filenames, unquote them before matching. pkgd = [unquote(x) for x in zf.getnames()] - missing = [x for x in expect if x not in pkgd] + missing = [x for x in self.mappings if x.output_path not in pkgd] if len(missing) > 0: print( 'Missing files in package %s:\n%s' % - (path, '\n'.join(missing))) + (path, '\n'.join([str(x) for x in missing]))) + print('Actual: %s' % '\n'.join(pkgd)) return False - else: - print('OK - %d expected files found' % len(expect)) - return True + + print('OK - %d expected files found' % len(self.mappings)) + return True diff -Nru librdkafka-1.9.2/packaging/nuget/README.md librdkafka-2.0.2/packaging/nuget/README.md --- librdkafka-1.9.2/packaging/nuget/README.md 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/packaging/nuget/README.md 2023-01-20 09:14:36.000000000 +0000 @@ -1,17 +1,19 @@ -# NuGet package assembly +# Package assembly -This set of scripts collect CI artifacts from S3 and assembles -them into a NuGet package structure staging directory. -The NuGet tool is then run (from within docker) on this staging directory -to create a proper NuGet package (with all the metadata). +This set of scripts collect CI artifacts from a local directory or S3, and +assembles them into a package structure defined by a packaging class in a +staging directory. +For the NugetPackage class the NuGet tool is then run (from within docker) on +this staging directory to create a proper NuGet package (with all the metadata). +While the StaticPackage class creates a tarball. The finalized nuget package maybe uploaded manually to NuGet.org ## Requirements - * Requires Python 2.x (due to Python 3 compat issues with rpmfile) + * Requires Python 3 * Requires Docker - * Requires private S3 access keys for the librdkafka-ci-packages bucket. + * (if --s3) Requires private S3 access keys for the librdkafka-ci-packages bucket. @@ -20,21 +22,24 @@ 1. Trigger CI builds by creating and pushing a new release (candidate) tag in the librdkafka repo. Make sure the tag is created on the correct branch. - $ git tag v0.11.0 - $ git push origin v0.11.0 + $ git tag v0.11.0-RC3 + $ git push origin v0.11.0-RC3 2. Wait for CI builds to finish, monitor the builds here: * https://travis-ci.org/edenhill/librdkafka * https://ci.appveyor.com/project/edenhill/librdkafka +Or if using SemaphoreCI, just have the packaging job depend on prior build jobs +in the same pipeline. + 3. On a Linux host, run the release.py script to assemble the NuGet package $ cd packaging/nuget # Specify the tag - $ ./release.py v0.11.0 + $ ./release.py v0.11.0-RC3 # Optionally, if the tag was moved and an exact sha is also required: - # $ ./release.py --sha v0.11.0 + # $ ./release.py --sha v0.11.0-RC3 4. If all artifacts were available the NuGet package will be built and reside in the current directory as librdkafka.redist..nupkg @@ -48,7 +53,7 @@ 7. If you trust this process you can have release.py upload the package automatically to NuGet after building it: - $ ./release.py --retries 100 --upload your-nuget-api.key v0.11.0 + $ ./release.py --retries 100 --upload your-nuget-api.key v0.11.0-RC3 diff -Nru librdkafka-1.9.2/packaging/nuget/release.py librdkafka-2.0.2/packaging/nuget/release.py --- librdkafka-1.9.2/packaging/nuget/release.py 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/packaging/nuget/release.py 2023-01-20 09:14:36.000000000 +0000 @@ -11,6 +11,8 @@ import argparse import time import packaging +import nugetpackage +import staticpackage dry_run = False @@ -20,8 +22,8 @@ parser = argparse.ArgumentParser() parser.add_argument( - "--no-s3", - help="Don't collect from S3", + "--s3", + help="Collect artifacts from S3 bucket", action="store_true") parser.add_argument("--dry-run", help="Locate artifacts but don't actually " @@ -40,6 +42,11 @@ help="Also match on this git sha1", default=None) parser.add_argument( + "--ignore-tag", + help="Ignore the artifacts' tag attribute (for devel use only)", + action="store_true", + default=False) + parser.add_argument( "--nuget-version", help="The nuget package version (defaults to same as tag)", default=None) @@ -50,7 +57,7 @@ type=str) parser.add_argument( "--class", - help="Packaging class (see packaging.py)", + help="Packaging class (either NugetPackage or StaticPackage)", default="NugetPackage", dest="pkgclass") parser.add_argument( @@ -66,11 +73,20 @@ if not args.directory: args.directory = 'dl-%s' % args.tag - match = {'tag': args.tag} + match = {} + if not args.ignore_tag: + match['tag'] = args.tag + if args.sha is not None: match['sha'] = args.sha - pkgclass = getattr(packaging, args.pkgclass) + if args.pkgclass == "NugetPackage": + pkgclass = nugetpackage.NugetPackage + elif args.pkgclass == "StaticPackage": + pkgclass = staticpackage.StaticPackage + else: + raise ValueError(f'Unknown packaging class {args.pkgclass}: ' + 'should be one of NugetPackage or StaticPackage') try: match.update(getattr(pkgclass, 'match')) @@ -83,7 +99,7 @@ arts.collect_local('common', req_tag=False) while True: - if not args.no_s3: + if args.s3: arts.collect_s3() arts.collect_local(arts.dlpath) @@ -96,9 +112,10 @@ print(' %s' % a.lpath) print('') - package_version = match['tag'] if args.nuget_version is not None: package_version = args.nuget_version + else: + package_version = args.tag print('') @@ -112,7 +129,7 @@ pkgfile = p.build(buildtype='release') break except packaging.MissingArtifactError as e: - if retries <= 0 or args.no_s3: + if retries <= 0 or not args.s3: if not args.no_cleanup: p.cleanup() raise e diff -Nru librdkafka-1.9.2/packaging/nuget/staticpackage.py librdkafka-2.0.2/packaging/nuget/staticpackage.py --- librdkafka-1.9.2/packaging/nuget/staticpackage.py 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/packaging/nuget/staticpackage.py 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# +# Create self-contained static-library tar-ball package +# + +import os +import tempfile +import shutil +import subprocess +from packaging import Package, Mapping + + +class StaticPackage (Package): + """ Create a tar-ball with self-contained static libraries. + These are later imported into confluent-kafka-go. """ + + # Make sure gssapi (cyrus-sasl) is not linked, since that is a + # dynamic linkage, by specifying negative match '!extra': 'gssapi'. + # Except for on OSX where cyrus-sasl is always available, and + # Windows where it is never linked. + # + # Match statically linked artifacts (which are included in 'all' builds) + mappings = [ + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos6', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/include/librdkafka/rdkafka.h', + 'rdkafka.h'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos6', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/share/doc/librdkafka/LICENSES.txt', + 'LICENSES.txt'), + + # glibc linux static lib and pkg-config file + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos6', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_glibc_linux_amd64.a'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'centos6', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_glibc_linux_amd64.pc'), + + # glibc linux arm64 static lib and pkg-config file + Mapping({'arch': 'arm64', + 'plat': 'linux', + 'dist': 'centos7', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_glibc_linux_arm64.a'), + Mapping({'arch': 'arm64', + 'plat': 'linux', + 'dist': 'centos7', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_glibc_linux_arm64.pc'), + + # musl linux static lib and pkg-config file + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'alpine', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_musl_linux_amd64.a'), + Mapping({'arch': 'x64', + 'plat': 'linux', + 'dist': 'alpine', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_musl_linux_amd64.pc'), + + # musl linux arm64 static lib and pkg-config file + Mapping({'arch': 'arm64', + 'plat': 'linux', + 'dist': 'alpine', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_musl_linux_arm64.a'), + Mapping({'arch': 'arm64', + 'plat': 'linux', + 'dist': 'alpine', + 'lnk': 'all', + '!extra': 'gssapi'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_musl_linux_arm64.pc'), + + # osx x64 static lib and pkg-config file + Mapping({'arch': 'x64', + 'plat': 'osx', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_darwin_amd64.a'), + Mapping({'arch': 'x64', + 'plat': 'osx', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_darwin_amd64.pc'), + + # osx arm64 static lib and pkg-config file + Mapping({'arch': 'arm64', + 'plat': 'osx', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/librdkafka-static.a', + 'librdkafka_darwin_arm64.a'), + Mapping({'arch': 'arm64', + 'plat': 'osx', + 'lnk': 'all'}, + 'librdkafka.tgz', + './usr/local/lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_darwin_arm64.pc'), + + # win static lib and pkg-config file (mingw) + Mapping({'arch': 'x64', + 'plat': 'win', + 'dist': 'mingw', + 'lnk': 'static'}, + 'librdkafka.tgz', + './lib/librdkafka-static.a', 'librdkafka_windows.a'), + Mapping({'arch': 'x64', + 'plat': 'win', + 'dist': 'mingw', + 'lnk': 'static'}, + 'librdkafka.tgz', + './lib/pkgconfig/rdkafka-static.pc', + 'librdkafka_windows.pc'), + ] + + def __init__(self, version, arts): + super(StaticPackage, self).__init__(version, arts) + + def cleanup(self): + if os.path.isdir(self.stpath): + shutil.rmtree(self.stpath) + + def build(self, buildtype): + """ Build single package for all artifacts. """ + + self.stpath = tempfile.mkdtemp(prefix="out-", dir=".") + + self.apply_mappings() + + print('Tree extracted to %s' % self.stpath) + + # After creating a bare-bone layout, create a tarball. + outname = "librdkafka-static-bundle-%s.tgz" % self.version + print('Writing to %s in %s' % (outname, self.stpath)) + subprocess.check_call("(cd %s && tar cvzf ../%s .)" % + (self.stpath, outname), + shell=True) + + return outname diff -Nru librdkafka-1.9.2/packaging/nuget/templates/librdkafka.redist.nuspec librdkafka-2.0.2/packaging/nuget/templates/librdkafka.redist.nuspec --- librdkafka-1.9.2/packaging/nuget/templates/librdkafka.redist.nuspec 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/packaging/nuget/templates/librdkafka.redist.nuspec 2023-01-20 09:14:36.000000000 +0000 @@ -5,14 +5,14 @@ ${version} librdkafka - redistributable Magnus Edenhill, edenhill - Magnus Edenhill, edenhill + Confluent Inc. false - https://github.com/edenhill/librdkafka/blob/master/LICENSES.txt - https://github.com/edenhill/librdkafka + https://github.com/confluentinc/librdkafka/blob/master/LICENSES.txt + https://github.com/confluentinc/librdkafka The Apache Kafka C/C++ client library - redistributable The Apache Kafka C/C++ client library Release of librdkafka - Copyright 2012-2017 + Copyright 2012-2023 native apache kafka librdkafka C C++ nativepackage diff -Nru librdkafka-1.9.2/packaging/RELEASE.md librdkafka-2.0.2/packaging/RELEASE.md --- librdkafka-1.9.2/packaging/RELEASE.md 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/packaging/RELEASE.md 2023-01-20 09:14:36.000000000 +0000 @@ -138,36 +138,13 @@ ## Creating packages -As soon as a tag is pushed the CI systems (Travis and AppVeyor) will -start their builds and eventually upload the packaging artifacts to S3. -Wait until this process is finished by monitoring the two CIs: - - * https://travis-ci.org/edenhill/librdkafka - * https://ci.appveyor.com/project/edenhill/librdkafka - - -### Create NuGet package - -On a Linux host with docker installed, this will also require S3 credentials -to be set up. - - $ cd packaging/nuget - $ python3 -m pip install -r requirements.txt # if necessary - $ ./release.py v0.11.1-RC1 - -Test the generated librdkafka.redist.0.11.1-RC1.nupkg and -then upload it to NuGet manually: - - * https://www.nuget.org/packages/manage/upload - - -### Create static bundle (for Go) - - $ cd packaging/nuget - $ ./release.py --class StaticPackage v0.11.1-RC1 - -Follow the Go client release instructions for updating its bundled librdkafka -version based on the tar ball created here. +As soon as a tag is pushed the CI system (SemaphoreCI) will start its +build pipeline and eventually upload packaging artifacts to the SemaphoreCI +project artifact store. + +Monitor the Semaphore CI project page to know when the build pipeline +is finished, then download the relevant artifacts for further use, see +*The artifact pipeline* chapter below. ## Publish release on github @@ -194,7 +171,7 @@ ### Homebrew recipe update **Note**: This is typically not needed since homebrew seems to pick up new - release versions quickly enough. + release versions quickly enough. Recommend you skip this step. The brew-update-pr.sh script automatically pushes a PR to homebrew-core with a patch to update the librdkafka version of the formula. @@ -211,11 +188,124 @@ ### Deb and RPM packaging -Debian and RPM packages are generated by Confluent packaging in a separate -process and the resulting packages are made available on Confluent's -APT and YUM repositories. +Debian and RPM packages are generated by Confluent packaging, called +Independent client releases, which is a separate non-public process and the +resulting packages are made available on Confluent's client deb and rpm +repositories. That process is outside the scope of this document. See the Confluent docs for instructions how to access these packages: https://docs.confluent.io/current/installation.html + + + + +## Build and release artifacts + +The following chapter explains what, how, and where artifacts are built. +It also outlines where these artifacts are used. + +### So what is an artifact? + +An artifact is a build of the librdkafka library, dynamic/shared and/or static, +with a certain set of external or built-in dependencies, for a specific +architecture and operating system (and sometimes even operating system version). + +If you build librdkafka from source with no special `./configure` arguments +you will end up with: + + * a dynamically linked library (e.g., `librdkafka.so.1`) + with a set of dynamically linked external dependencies (OpenSSL, zlib, etc), + all depending on what dependencies are available on the build host. + + * a static library (`librdkafka.a`) that will have external dependencies + that needs to be linked dynamically. There is no way for a static library + to express link dependencies, so there will also be `rdkafka-static.pc` + pkg-config file generated that contains linker flags for the external + dependencies. + Those external dependencies are however most likely only available on the + build host, so this static library is not particularily useful for + repackaging purposes (such as for high-level clients using librdkafka). + + * a self-contained static-library (`librdkafka-static.a`) which attempts + to contain static versions of all external dependencies, effectively making + it possible to link just with `librdkafka-static.a` to get all + dependencies needed. + Since the state of static libraries in the various distro and OS packaging + systems is of varying quality and availability, it is usually not possible + for the librdkafka build system (mklove) to generate this completely + self-contained static library simply using dependencies available on the + build system, and the make phase of the build will emit warnings when it + can't bundle all external dependencies due to this. + To circumvent this problem it is possible for the build system (mklove) + to download and build static libraries of all needed external dependencies, + which in turn allows it to create a complete bundle of all dependencies. + This results in a `librdkafka-static.a` that has no external dependecies + other than the system libraries (libc, pthreads, rt, etc). + To achieve this you will need to pass + `--install-deps --source-deps-only --enable-static` to + librdkafka's `./configure`. + + * `rdkafka.pc` and `rdkafka-static.pc` pkg-config files that tells + applications and libraries that depend on librdkafka what external + dependencies are needed to successfully link with librdkafka. + This is mainly useful for the dynamic librdkafka librdkafka + (`librdkafka.so.1` or `librdkafka.1.dylib` on OSX). + + +**NOTE**: Due to libsasl2/cyrus-sasl's dynamically loaded plugins, it is +not possible for us to provide a self-contained static library with +GSSAPI/Kerberos support. + + + +### The artifact pipeline + +We rely solely on CI systems to build our artifacts; no artifacts must be built +on a non-CI system (e.g., someones work laptop, some random ec2 instance, etc). + +The reasons for this are: + + 1. Reproducible builds: we want a well-defined environment that doesn't change + (too much) without notice and that we can rebuild artifacts on at a later + time if required. + 2. Security; these CI systems provide at least some degree of security + guarantees, and they're managed by people who knows what they're doing + most of the time. This minimizes the risk for an artifact to be silently + compromised due to the developer's laptop being hacked. + 3. Logs; we have build logs for all artifacts, which contains checksums. + This way we can know how an artifact was built, what features were enabled + and what versions of dependencies were used, as well as know that an + artifact has not been tampered with after leaving the CI system. + + +By default the CI jobs are triggered by branch pushes and pull requests +and contain a set of jobs to validate that the changes that were pushed does +not break compilation or functionality (by running parts of the test suite). +These jobs do not produce any artifacts. + + +For the artifact pipeline there's tag builds, which are triggered by pushing a +tag to the git repository. +These tag builds will generate artifacts which are used by the same pipeline +to create NuGet and static library packages, which are then uploaded to +SemaphoreCI's project artifact store. + +Once a tag build pipeline is done, you can download the relevant packages +from the Semaphore CI project artifact store. + +The NuGet package, `librdkafka.redist..nupkg`, needs to be +manually uploaded to NuGet. + +The `librdkafka-static-bundle-.tgz` static library bundle +needs to be manually imported into the confluent-kafka-go client using the +import script that resides in the Go client repository. + + +**Note**: You will need a NuGet API key to upload nuget packages. + + +See [nuget/nugetpackaging.py] and [nuget/staticpackaging.py] to see how +packages are assembled from build artifacts. + diff -Nru librdkafka-1.9.2/packaging/tools/build-release-artifacts.sh librdkafka-2.0.2/packaging/tools/build-release-artifacts.sh --- librdkafka-1.9.2/packaging/tools/build-release-artifacts.sh 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/packaging/tools/build-release-artifacts.sh 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,138 @@ +#!/bin/sh +# +# ^ NOTE: This needs to be sh, not bash, for alpine compatibility. +# +# +# Build dynamic and statically linked librdkafka libraries useful for +# release artifacts in high-level clients. +# +# Requires docker. +# Supported docker images: +# alpine:3.16 +# quay.io/pypa/manylinux2014_aarch64 (centos7) +# quay.io/pypa/manylinux2014_x86_64 (centos7) +# quay.io/pypa/manylinux2010_x86_64 (centos6) +# +# Usage: +# packaging/tools/build-release-artifacts.sh [--disable-gssapi] +# +# The output path must be a relative path and inside the librdkafka directory +# structure. +# + +set -e + +docker_image="" +extra_pkgs_rpm="" +extra_pkgs_apk="" +extra_config_args="" +expected_features="gzip snappy ssl sasl regex lz4 sasl_plain sasl_scram plugins zstd sasl_oauthbearer http oidc" + +# Since cyrus-sasl is the only non-statically-linkable dependency, +# we provide a --disable-gssapi option so that two different libraries +# can be built: one with GSSAPI/Kerberos support, and one without, depending +# on this option. +if [ "$1" = "--disable-gssapi" ]; then + extra_config_args="${extra_config_args} --disable-gssapi" + disable_gssapi="$1" + shift +else + extra_pkgs_rpm="${extra_pkgs_rpm} cyrus-sasl cyrus-sasl-devel" + extra_pkgs_apk="${extra_pkgs_apk} cyrus-sasl cyrus-sasl-dev" + expected_features="${expected_features} sasl_gssapi" + disable_gssapi="" +fi + +# Check if we're running on the host or the (docker) build target. +if [ "$1" = "--in-docker" -a $# -eq 2 ]; then + output="$2" +elif [ $# -eq 2 ]; then + docker_image="$1" + output="$2" +else + echo "Usage: $0 [--disable-gssapi] " + exit 1 +fi + +if [ -n "$docker_image" ]; then + # Running on the host, spin up the docker builder. + exec docker run -v "$PWD:/v" $docker_image /v/packaging/tools/build-release-artifacts.sh $disable_gssapi --in-docker "/v/$output" + # Only reached on exec error + exit $? +fi + + +######################################################################## +# Running in the docker instance, this is where we perform the build. # +######################################################################## + + +# Packages required for building librdkafka (perl is for openssl). + +if grep -q alpine /etc/os-release 2>/dev/null ; then + # Alpine + apk add \ + bash curl gcc g++ make musl-dev linux-headers bsd-compat-headers git \ + python3 perl patch $extra_pkgs_apk + +else + # CentOS + yum install -y libstdc++-devel gcc gcc-c++ python3 git perl-IPC-Cmd $extra_pkgs_rpm +fi + + +# Clone the repo so other builds are unaffected of what we're doing +# and we get a pristine build tree. +git clone /v /librdkafka + +cd /librdkafka + +# Build librdkafka +./configure \ + --install-deps --source-deps-only --disable-lz4-ext \ + --enable-static --enable-strip $extra_config_args + +make -j + +# Show library linkage (for troubleshooting) and checksums (for verification) +for lib in src/librdkafka.so.1 src-cpp/librdkafka++.so.1; do + echo "$0: LINKAGE ${lib}:" + ldd src/librdkafka.so.1 + echo "$0: SHA256 ${lib}:" + sha256sum "$lib" +done + +# Verify that expected features are indeed built. +features=$(examples/rdkafka_example -X builtin.features) +echo "$0: FEATURES: $features" + +missing="" +for f in $expected_features; do + if ! echo "$features" | grep -q "$f" ; then + echo "$0: BUILD IS MISSING FEATURE $f" + missing="${missing} $f" + fi +done + +if [ -n "$missing" ]; then + exit 1 +fi + + +# Run quick test suite, mark it as CI to avoid time/resource sensitive +# tests to fail in case the worker is under-powered. +CI=true make -C tests run_local_quick + + +# Install librdkafka and then make a tar ball of the installed files. +mkdir -p /destdir + +DESTDIR=/destdir make install + +cd /destdir +tar cvzf "$output" . + +# Emit output hash so that build logs can be used to verify artifacts later. +echo "$0: SHA256 $output:" +sha256sum "$output" + diff -Nru librdkafka-1.9.2/packaging/tools/style-format.sh librdkafka-2.0.2/packaging/tools/style-format.sh --- librdkafka-1.9.2/packaging/tools/style-format.sh 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/packaging/tools/style-format.sh 2023-01-20 09:14:36.000000000 +0000 @@ -3,6 +3,11 @@ # Check or apply/fix the project coding style to all files passed as arguments. # Uses clang-format for C/C++ and flake8 for Python. # +# Requires clang-format version 10 (apt install clang-format-10). +# + + +CLANG_FORMAT=${CLANG_FORMAT:-clang-format} set -e @@ -21,6 +26,12 @@ fix=0 fi +clang_format_version=$(${CLANG_FORMAT} --version | sed -Ee 's/.*version ([[:digit:]]+)\.[[:digit:]]+\.[[:digit:]]+.*/\1/') +if [[ $clang_format_version != "10" ]] ; then + echo "$0: clang-format version 10, '$clang_format_version' detected" + exit 1 +fi + # Get list of files from .formatignore to ignore formatting for. ignore_files=( $(grep '^[^#]..' .formatignore) ) @@ -73,12 +84,15 @@ check=0 if [[ $fix == 1 ]]; then - # Convert tabs to spaces first. - sed -i -e 's/\t/ /g' "$f" + # Convert tabs to 8 spaces first. + if grep -ql $'\t' "$f"; then + sed -i -e 's/\t/ /g' "$f" + echo "$f: tabs converted to spaces" + fi if [[ $lang == c ]]; then # Run clang-format to reformat the file - clang-format --style="$style" "$f" > _styletmp + ${CLANG_FORMAT} --style="$style" "$f" > _styletmp else # Run autopep8 to reformat the file. @@ -104,7 +118,7 @@ # Check style if [[ $lang == c ]]; then - if ! clang-format --style="$style" --Werror --dry-run "$f" ; then + if ! ${CLANG_FORMAT} --style="$style" --Werror --dry-run "$f" ; then echo "$f: had style errors ($stylename): see clang-format output above" ret=1 fi diff -Nru librdkafka-1.9.2/README.md librdkafka-2.0.2/README.md --- librdkafka-1.9.2/README.md 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/README.md 2023-01-20 09:14:36.000000000 +0000 @@ -1,7 +1,7 @@ librdkafka - the Apache Kafka C/C++ client library ================================================== -Copyright (c) 2012-2020, [Magnus Edenhill](http://www.edenhill.se/). +Copyright (c) 2012-2022, [Magnus Edenhill](http://www.edenhill.se/). [https://github.com/edenhill/librdkafka](https://github.com/edenhill/librdkafka) @@ -108,6 +108,7 @@ libssl-dev (optional, for SSL and SASL SCRAM support) libsasl2-dev (optional, for SASL GSSAPI support) libzstd-dev (optional, for ZStd compression support) + libcurl-dev (optional, for SASL OAUTHBEARER OIDC support) **NOTE**: Static linking of ZStd (requires zstd >= 1.2.1) in the producer enables encoding the original size in the compression frame header, @@ -177,6 +178,7 @@ * Erlang: [erlkaf](https://github.com/silviucpp/erlkaf) * Go: [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go) * Haskell (kafka, conduit, avro, schema registry): [hw-kafka](https://github.com/haskell-works/hw-kafka) + * Kotlin Native: [Kafka-Kotlin-Native](https://github.com/icemachined/kafka-kotlin-native) * Lua: [luardkafka](https://github.com/mistsv/luardkafka) * Node.js: [node-rdkafka](https://github.com/Blizzard/node-rdkafka) * OCaml: [ocaml-kafka](https://github.com/didier-wenzek/ocaml-kafka) diff -Nru librdkafka-1.9.2/.semaphore/semaphore.yml librdkafka-2.0.2/.semaphore/semaphore.yml --- librdkafka-1.9.2/.semaphore/semaphore.yml 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/.semaphore/semaphore.yml 2023-01-20 09:14:36.000000000 +0000 @@ -1,29 +1,368 @@ version: v1.0 -name: M1 Pipeline +name: 'librdkafka build and release artifact pipeline' agent: machine: - type: s1-prod-mac-m1 + type: s1-prod-macos-arm64 +global_job_config: + prologue: + commands: + - checkout + - mkdir artifacts + - mkdir dest blocks: - - name: 'Build, Test, Package' + - name: 'OSX arm64/m1' + dependencies: [] task: + agent: + machine: + type: s1-prod-macos-arm64 + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-osx__arch-arm64__lnk-all + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' jobs: - name: 'Build' - env_vars: - - name: CC - value: gcc commands: - - cd $SEM_WORKSPACE - - checkout - - export WORKSPACE=$SEM_WORKSPACE/librdkafka - - cd $WORKSPACE - - mkdir dest artifacts - - ./configure --install-deps --source-deps-only --enable-static --disable-lz4-ext --prefix="$WORKSPACE/dest" --enable-strip - - make -j2 all examples check - - make -j2 -C tests build + - ./configure --install-deps --source-deps-only --enable-static --disable-lz4-ext --enable-strip + - make -j all examples check + - examples/rdkafka_example -X builtin.features + - otool -L src/librdkafka.dylib + - otool -L src-cpp/librdkafka++.dylib + - make -j -C tests build + - make -C tests run_local_quick + - DESTDIR="$PWD/dest" make install + - (cd dest && tar cvzf ../artifacts/librdkafka.tgz .) + + + - name: 'OSX x64' + dependencies: [] + task: + agent: + machine: + type: s1-prod-macos + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-osx__arch-x64__lnk-all + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build' + commands: + - ./configure --install-deps --source-deps-only --enable-static --disable-lz4-ext --enable-strip + - make -j all examples check + - examples/rdkafka_example -X builtin.features + - otool -L src/librdkafka.dylib + - otool -L src-cpp/librdkafka++.dylib + - make -j -C tests build + - make -C tests run_local_quick + - DESTDIR="$PWD/dest" make install + - (cd dest && tar cvzf ../artifacts/librdkafka.tgz .) + + + - name: 'Style check' + dependencies: [] + skip: + # Skip for release tags, we don't want style checks + # to fail the release build. + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Style check' + commands: + - sudo apt install -y clang-format-10 python3 python3-pip python3-setuptools + - python3 -m pip install -r packaging/tools/requirements.txt + - CLANG_FORMAT=clang-format-10 make style-check + + + - name: 'Build documentation' + dependencies: [] + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Generate documentation' + commands: + - sudo apt install -y doxygen graphviz + - make docs + - (cd staging-docs && tar cvzf ../artifacts/librdkafka-docs.tgz .) + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/librdkafka-docs.tgz --destination artifacts/librdkafka-docs.tgz' + + + - name: 'Linux Ubuntu x64: source build' + dependencies: [] + skip: + # Skip for release tags, we don't want flaky CI tests + # to fail the release build. + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Build and integration tests' + commands: + - wget -O rapidjson-dev.deb https://launchpad.net/ubuntu/+archive/primary/+files/rapidjson-dev_1.1.0+dfsg2-3_all.deb + - sudo dpkg -i rapidjson-dev.deb + - python3 -m pip install -U pip + - python3 -m pip -V + - python3 -m pip install -r tests/requirements.txt + - ./configure --install-deps + # split these up + - ./packaging/tools/rdutcoverage.sh + - make copyright-check + - make -j all examples check + - echo "Verifying that CONFIGURATION.md does not have manual changes" + - git diff --exit-code CONFIGURATION.md + - examples/rdkafka_example -X builtin.features + - ldd src/librdkafka.so.1 + - ldd src-cpp/librdkafka++.so.1 + - make -j -C tests build - make -C tests run_local_quick - - make install - - cd $WORKSPACE/dest - - tar cvzf ${WORKSPACE}/artifacts/librdkafka-${CC}.tar.gz . - - artifact push job ${WORKSPACE}/artifacts/librdkafka-${CC}.tar.gz - - cd $WORKSPACE - - sha256sum artifacts/* + - DESTDIR="$PWD/dest" make install + - (cd tests && python3 -m trivup.clusters.KafkaCluster --version 3.1.0 --cmd 'make quick') + + + - name: 'Linux x64: release artifact docker builds' + dependencies: [] + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build: centos6 glibc +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos6__arch-x64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux2010_x86_64 artifacts/librdkafka.tgz + + - name: 'Build: centos6 glibc' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos6__arch-x64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux2010_x86_64 artifacts/librdkafka.tgz + + - name: 'Build: centos7 glibc +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos7__arch-x64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux2014_x86_64 artifacts/librdkafka.tgz + + - name: 'Build: centos7 glibc' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos7__arch-x64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux2014_x86_64 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-x64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh alpine:3.16 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-x64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi alpine:3.16 artifacts/librdkafka.tgz + + + - name: 'Linux arm64: release artifact docker builds' + dependencies: [] + task: + agent: + machine: + type: s1-prod-ubuntu20-04-arm64-1 + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build: centos7 glibc +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos7__arch-arm64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux2014_aarch64 artifacts/librdkafka.tgz + + - name: 'Build: centos7 glibc' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos7__arch-arm64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux2014_aarch64 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-arm64__lnk-all__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh alpine:3.16 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-arm64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi alpine:3.16 artifacts/librdkafka.tgz + + + - name: 'Windows x64: MinGW-w64' + dependencies: [] + task: + agent: + machine: + type: s1-prod-windows + env_vars: + - name: CHERE_INVOKING + value: 'yes' + - name: MSYSTEM + value: UCRT64 + prologue: + commands: + - cache restore msys2-x64-${Env:ARTIFACT_KEY} + # Set up msys2 + - "& .\\win32\\setup-msys2.ps1" + - cache delete msys2-x64-${Env:ARTIFACT_KEY} + - cache store msys2-x64-${Env:ARTIFACT_KEY} c:/msys64 + epilogue: + commands: + - if ($env:SEMAPHORE_GIT_TAG_NAME -ne "") { artifact push workflow artifacts/ --destination artifacts/$Env:ARTIFACT_KEY/ } + jobs: + - name: 'Build: MinGW-w64 Dynamic' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-mingw__arch-x64__lnk-std + commands: + - C:\msys64\usr\bin\bash -lc './packaging/mingw-w64/semaphoreci-build.sh ./artifacts/librdkafka.tgz' + + - name: 'Build: MinGW-w64 Static' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-mingw__arch-x64__lnk-static + commands: + - C:\msys64\usr\bin\bash -lc './packaging/mingw-w64/semaphoreci-build.sh --static ./artifacts/librdkafka.tgz' + + + - name: 'Windows x64: Windows SDK 10.0 / MSVC v142 / VS 2019' + dependencies: [] + task: + agent: + machine: + type: s1-prod-windows + env_vars: + # Disable vcpkg telemetry + - name: VCPKG_DISABLE_METRICS + value: 'yes' + prologue: + commands: + # install vcpkg in the parent directory. + - pwd + - cd .. + # Restore vcpkg caches, if any. + - cache restore vcpkg-archives-$Env:ARTIFACT_KEY + # Setup vcpkg + - "& .\\librdkafka\\win32\\setup-vcpkg.ps1" + - cd librdkafka + - ..\vcpkg\vcpkg integrate install + # Install required packages. + - ..\vcpkg\vcpkg --feature-flags=versions install --triplet $Env:triplet + - cd .. + - pwd + # Store vcpkg caches + - ls vcpkg/ + - echo $Env:VCPKG_ROOT + - cache delete vcpkg-archives-$Env:ARTIFACT_KEY + - cache store vcpkg-archives-$Env:ARTIFACT_KEY C:/Users/semaphore/AppData/Local/vcpkg/archives + - pwd + - cd librdkafka + # coapp is needed for creating the intermediary nuget packages. + - "& .\\win32\\install-coapp.ps1" + epilogue: + commands: + - Get-ChildItem . -include *.dll -recurse + - Get-ChildItem . -include *.lib -recurse + - if ($env:SEMAPHORE_GIT_TAG_NAME -ne "") { artifact push workflow artifacts/ --destination artifacts/$Env:ARTIFACT_KEY/ } + jobs: + - name: 'Build: MSVC x64' + env_vars: + - name: triplet + value: x64-windows + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-msvc__arch-x64__lnk-std + commands: + - "& .\\win32\\msbuild.ps1 -config Release -platform x64" + - "& .\\win32\\package-nuget.ps1 -destdir .\\artifacts\\" + + - name: 'Build: MSVC x86' + env_vars: + - name: triplet + value: x86-windows + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-msvc__arch-x86__lnk-std + commands: + - "& .\\win32\\msbuild.ps1 -config Release -platform Win32" + - "& .\\win32\\package-nuget.ps1 -destdir .\\artifacts\\" + + - name: 'Packaging' + dependencies: + - 'Build documentation' + - 'OSX arm64/m1' + - 'OSX x64' + - 'Linux x64: release artifact docker builds' + - 'Linux arm64: release artifact docker builds' + - 'Windows x64: MinGW-w64' + - 'Windows x64: Windows SDK 10.0 / MSVC v142 / VS 2019' + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Build NuGet and static packages' + commands: + # Get all artifacts from previous jobs in this workflow/pipeline. + - artifact pull workflow artifacts + - mkdir -p packages + # Prepare packaging tools + - cd packaging/nuget + - python3 -m pip install -U -r requirements.txt + # Create NuGet package + # We need --ignore-tag since the jobs don't add the tag to + # the artifact path, and they don't need to since these artifacts + # are part of the same workflow. + - ./release.py --directory ../../artifacts --ignore-tag --class NugetPackage ${SEMAPHORE_GIT_TAG_NAME} + - cp -v librdkafka.redist.*.nupkg ../../packages + # Create static package + - ./release.py --directory ../../artifacts --ignore-tag --class StaticPackage ${SEMAPHORE_GIT_TAG_NAME} + - cp -v librdkafka-static-bundle*.tgz ../../packages + - cd ../../ + # Copy generated docs to packages for inclusion in the tar ball + - cp -v artifacts/librdkafka-docs.tgz packages/ + # Maker super tar ball of all packages + - cd packages + - tar cvf librdkafka-packages-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID}.tar . + # Provide some extra details + - ls -la + - sha256sum * + - cd .. + # Upload all packages to project artifact store + - artifact push project packages --destination librdkafka-packages-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID} + - echo Thank you diff -Nru librdkafka-1.9.2/service.yml librdkafka-2.0.2/service.yml --- librdkafka-1.9.2/service.yml 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/service.yml 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,18 @@ +name: librdkafka +lang: unknown +lang_version: unknown +git: + enable: true +github: + enable: true +semaphore: + enable: true + pipeline_enable: false + triggers: + - tags + - branches + branches: + - master + - /semaphore.*/ + - /dev_.*/ + - /feature\/.*/ diff -Nru librdkafka-1.9.2/src/CMakeLists.txt librdkafka-2.0.2/src/CMakeLists.txt --- librdkafka-1.9.2/src/CMakeLists.txt 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/CMakeLists.txt 2023-01-20 09:14:36.000000000 +0000 @@ -51,6 +51,7 @@ rdkafka_mock_handlers.c rdkafka_mock_cgrp.c rdkafka_error.c + rdkafka_fetcher.c rdlist.c rdlog.c rdmurmur2.c @@ -196,6 +197,12 @@ file(MAKE_DIRECTORY "${dummy}") target_include_directories(rdkafka PUBLIC "$") +if(WITH_CURL) + find_package(CURL REQUIRED) + target_include_directories(rdkafka PUBLIC ${CURL_INCLUDE_DIRS}) + target_link_libraries(rdkafka PUBLIC ${CURL_LIBRARIES}) +endif() + if(WITH_HDRHISTOGRAM) target_link_libraries(rdkafka PUBLIC m) endif() @@ -287,6 +294,11 @@ if(NOT RDKAFKA_BUILD_STATIC) set(PKG_CONFIG_NAME "librdkafka") set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library") + + if(WITH_CURL) + string(APPEND PKG_CONFIG_REQUIRES "curl ") + endif() + if(WITH_ZLIB) string(APPEND PKG_CONFIG_REQUIRES "zlib ") endif() diff -Nru librdkafka-1.9.2/src/Makefile librdkafka-2.0.2/src/Makefile --- librdkafka-1.9.2/src/Makefile 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/Makefile 2023-01-20 09:14:36.000000000 +0000 @@ -55,7 +55,7 @@ rdkafka_txnmgr.c rdkafka_coord.c \ rdvarint.c rdbuf.c rdmap.c rdunittest.c \ rdkafka_mock.c rdkafka_mock_handlers.c rdkafka_mock_cgrp.c \ - rdkafka_error.c \ + rdkafka_error.c rdkafka_fetcher.c \ $(SRCS_y) HDRS= rdkafka.h rdkafka_mock.h diff -Nru librdkafka-1.9.2/src/rdaddr.c librdkafka-2.0.2/src/rdaddr.c --- librdkafka-1.9.2/src/rdaddr.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdaddr.c 2023-01-20 09:14:36.000000000 +0000 @@ -154,13 +154,20 @@ -rd_sockaddr_list_t *rd_getaddrinfo(const char *nodesvc, - const char *defsvc, - int flags, - int family, - int socktype, - int protocol, - const char **errstr) { +rd_sockaddr_list_t * +rd_getaddrinfo(const char *nodesvc, + const char *defsvc, + int flags, + int family, + int socktype, + int protocol, + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque), + void *opaque, + const char **errstr) { struct addrinfo hints; memset(&hints, 0, sizeof(hints)); hints.ai_family = family; @@ -182,7 +189,13 @@ if (*svc) defsvc = svc; - if ((r = getaddrinfo(node, defsvc, &hints, &ais))) { + if (resolve_cb) { + r = resolve_cb(node, defsvc, &hints, &ais, opaque); + } else { + r = getaddrinfo(node, defsvc, &hints, &ais); + } + + if (r) { #ifdef EAI_SYSTEM if (r == EAI_SYSTEM) #else @@ -206,7 +219,10 @@ if (cnt == 0) { /* unlikely? */ - freeaddrinfo(ais); + if (resolve_cb) + resolve_cb(NULL, NULL, NULL, &ais, opaque); + else + freeaddrinfo(ais); errno = ENOENT; *errstr = "No addresses"; return NULL; @@ -219,7 +235,10 @@ memcpy(&rsal->rsal_addr[rsal->rsal_cnt++], ai->ai_addr, ai->ai_addrlen); - freeaddrinfo(ais); + if (resolve_cb) + resolve_cb(NULL, NULL, NULL, &ais, opaque); + else + freeaddrinfo(ais); /* Shuffle address list for proper round-robin */ if (!(flags & RD_AI_NOSHUFFLE)) diff -Nru librdkafka-1.9.2/src/rdaddr.h librdkafka-2.0.2/src/rdaddr.h --- librdkafka-1.9.2/src/rdaddr.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdaddr.h 2023-01-20 09:14:36.000000000 +0000 @@ -157,13 +157,22 @@ * FIXME: Guessing non-used bits like this \ * is a bad idea. */ -rd_sockaddr_list_t *rd_getaddrinfo(const char *nodesvc, - const char *defsvc, - int flags, - int family, - int socktype, - int protocol, - const char **errstr); +struct addrinfo; + +rd_sockaddr_list_t * +rd_getaddrinfo(const char *nodesvc, + const char *defsvc, + int flags, + int family, + int socktype, + int protocol, + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque), + void *opaque, + const char **errstr); diff -Nru librdkafka-1.9.2/src/rd.h librdkafka-2.0.2/src/rd.h --- librdkafka-1.9.2/src/rd.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rd.h 2023-01-20 09:14:36.000000000 +0000 @@ -98,6 +98,14 @@ } while (0) #endif +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) +/** Function attribute to indicate that a sentinel NULL is required at the + * end of the va-arg input list. */ +#define RD_SENTINEL __attribute__((__sentinel__)) +#else +#define RD_SENTINEL +#endif + /** Assert if reached */ #define RD_NOTREACHED() rd_assert(!*"/* NOTREACHED */ violated") diff -Nru librdkafka-1.9.2/src/rdhttp.c librdkafka-2.0.2/src/rdhttp.c --- librdkafka-1.9.2/src/rdhttp.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdhttp.c 2023-01-20 09:14:36.000000000 +0000 @@ -107,7 +107,7 @@ void rd_http_req_destroy(rd_http_req_t *hreq) { RD_IF_FREE(hreq->hreq_curl, curl_easy_cleanup); - RD_IF_FREE(hreq->hreq_buf, rd_buf_destroy); + RD_IF_FREE(hreq->hreq_buf, rd_buf_destroy_free); } diff -Nru librdkafka-1.9.2/src/rdkafka_admin.c librdkafka-2.0.2/src/rdkafka_admin.c --- librdkafka-1.9.2/src/rdkafka_admin.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_admin.c 2023-01-20 09:14:36.000000000 +0000 @@ -37,10 +37,13 @@ /** @brief Descriptive strings for rko_u.admin_request.state */ static const char *rd_kafka_admin_state_desc[] = { - "initializing", "waiting for broker", - "waiting for controller", "waiting for fanouts", - "constructing request", "waiting for response from broker", -}; + "initializing", + "waiting for broker", + "waiting for controller", + "waiting for fanouts", + "constructing request", + "waiting for response from broker", + "waiting for a valid list of brokers to be available"}; @@ -101,7 +104,7 @@ * 6. [rdkafka main thread] The worker callback is called. * After some initial checking of err==ERR__DESTROY events * (which is used to clean up outstanding ops (etc) on termination), - * the code hits a state machine using rko_u.admin.request_state. + * the code hits a state machine using rko_u.admin_request.state. * * 7. [rdkafka main thread] The initial state is RD_KAFKA_ADMIN_STATE_INIT * where the worker validates the user input. @@ -231,6 +234,7 @@ RD_KAFKA_ADMIN_TARGET_COORDINATOR = -2, /**< (Group) Coordinator */ RD_KAFKA_ADMIN_TARGET_FANOUT = -3, /**< This rko is a fanout and * and has no target broker */ + RD_KAFKA_ADMIN_TARGET_ALL = -4, /**< All available brokers */ }; /** @@ -259,6 +263,8 @@ typedef rd_list_copy_cb_t rd_kafka_admin_fanout_CopyResult_cb_t; +typedef rd_list_copy_cb_t rd_kafka_admin_fanout_CopyArg_cb_t; + /** * @struct Request-specific worker callbacks. */ @@ -281,6 +287,9 @@ /** Copy an accumulated result for storing into the rko_result. */ rd_kafka_admin_fanout_CopyResult_cb_t *copy_result; + + /** Copy the original arguments, used by target ALL. */ + rd_kafka_admin_fanout_CopyArg_cb_t *copy_arg; }; /* Forward declarations */ @@ -289,6 +298,10 @@ rd_bool_t do_destroy); static void rd_kafka_AdminOptions_init(rd_kafka_t *rk, rd_kafka_AdminOptions_t *options); + +static void rd_kafka_AdminOptions_copy_to(rd_kafka_AdminOptions_t *dst, + const rd_kafka_AdminOptions_t *src); + static rd_kafka_op_res_t rd_kafka_admin_worker(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko); static rd_kafka_ConfigEntry_t * @@ -558,7 +571,9 @@ rd_kafka_op_type_t reqtype = rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; rd_assert(reqtype == RD_KAFKA_OP_DELETEGROUPS || - reqtype == RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS); + reqtype == RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS || + reqtype == RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS || + reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS); *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); return (const rd_kafka_group_result_t **) @@ -614,7 +629,8 @@ /* Make a copy of the options */ if (options) - rko->rko_u.admin_request.options = *options; + rd_kafka_AdminOptions_copy_to(&rko->rko_u.admin_request.options, + options); else rd_kafka_AdminOptions_init(rk, &rko->rko_u.admin_request.options); @@ -800,6 +816,57 @@ } +/** + * @brief Asynchronously look up current list of broker ids until available. + * Bootstrap and logical brokers are excluded from the list. + * + * To be called repeatedly from each invocation of the worker + * when in state RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST until + * a not-NULL rd_list_t * is returned. + * + * @param rk Client instance. + * @param rko Op containing the admin request eonce to use for the + * async callback. + * @return List of int32_t with broker nodeids when ready, NULL when + * the eonce callback will be called. + */ +static rd_list_t * +rd_kafka_admin_common_brokers_get_nodeids(rd_kafka_t *rk, rd_kafka_op_t *rko) { + rd_list_t *broker_ids; + + rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up brokers", + rd_kafka_op2str(rko->rko_type)); + + /* Since we're iterating over this rd_kafka_brokers_get_nodeids_async() + * call (asynchronously) until a nodeids list is available (or timeout), + * we need to re-enable the eonce to be triggered again (which + * is not necessary the first time we get here, but there + * is no harm doing it then either). */ + rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko, + RD_KAFKA_REPLYQ(rk->rk_ops, 0)); + + /* Look up the nodeids list asynchronously, if it's + * not available the eonce is registered for broker + * state changes which will cause our function to be called + * again as soon as (any) broker state changes. + * When we are called again we perform the same lookup + * again and hopefully get a list of nodeids again, + * otherwise defer a new async wait. + * Repeat until success or timeout. */ + if (!(broker_ids = rd_kafka_brokers_get_nodeids_async( + rk, rko->rko_u.admin_request.eonce))) { + /* nodeids list not available, wait asynchronously + * for the eonce to be triggered. */ + return NULL; + } + + rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: %d broker(s)", + rd_kafka_op2str(rko->rko_type), rd_list_cnt(broker_ids)); + + return broker_ids; +} + + /** * @brief Handle response from broker by triggering worker callback. @@ -924,6 +991,9 @@ rd_kafka_admin_result_enq(rko, rko_result); } +static void rd_kafka_admin_fanout_op_distribute(rd_kafka_t *rk, + rd_kafka_op_t *rko, + rd_list_t *nodeids); /** @@ -953,6 +1023,7 @@ rd_ts_t timeout_in; rd_kafka_broker_t *rkb = NULL; rd_kafka_resp_err_t err; + rd_list_t *nodeids = NULL; char errstr[512]; /* ADMIN_FANOUT handled by fanout_worker() */ @@ -1050,17 +1121,22 @@ rd_kafka_enq_once_add_source( rko->rko_u.admin_request.eonce, "coordinator request"); - rd_kafka_coord_req(rk, - rko->rko_u.admin_request.coordtype, - rko->rko_u.admin_request.coordkey, - rd_kafka_admin_coord_request, NULL, - rd_kafka_admin_timeout_remains(rko), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), - rd_kafka_admin_coord_response_parse, - rko->rko_u.admin_request.eonce); + rd_kafka_coord_req( + rk, rko->rko_u.admin_request.coordtype, + rko->rko_u.admin_request.coordkey, + rd_kafka_admin_coord_request, NULL, 0 /* no delay*/, + rd_kafka_admin_timeout_remains(rko), + RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_admin_coord_response_parse, + rko->rko_u.admin_request.eonce); /* Wait asynchronously for broker response, which will * trigger the eonce and worker to be called again. */ return RD_KAFKA_OP_RES_KEEP; + case RD_KAFKA_ADMIN_TARGET_ALL: + /* All brokers */ + rko->rko_u.admin_request.state = + RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST; + goto redo; /* Trigger next state immediately */ case RD_KAFKA_ADMIN_TARGET_FANOUT: /* Shouldn't come here, fanouts are handled by @@ -1100,11 +1176,24 @@ RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST; goto redo; + case RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST: + /* Wait for a valid list of brokers to be available. */ + if (!(nodeids = + rd_kafka_admin_common_brokers_get_nodeids(rk, rko))) { + /* Still waiting for brokers to become available. */ + return RD_KAFKA_OP_RES_KEEP; + } + + rd_kafka_admin_fanout_op_distribute(rk, rko, nodeids); + rd_list_destroy(nodeids); + rko->rko_u.admin_request.state = + RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS; + goto redo; + case RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS: - /* This state is only used by ADMIN_FANOUT which has - * its own fanout_worker() */ - RD_NOTREACHED(); - break; + /* This op can be destroyed, as a new fanout op has been + * sent, and the response will be enqueued there. */ + goto destroy; case RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST: /* Got broker, send protocol request. */ @@ -1158,7 +1247,6 @@ return RD_KAFKA_OP_RES_HANDLED; /* trigger's op_destroy() */ } - /** * @brief Create a new admin_fanout op of type \p req_type and sets up the * generic (type independent files). @@ -1198,7 +1286,8 @@ /* Make a copy of the options */ if (options) - rko->rko_u.admin_request.options = *options; + rd_kafka_AdminOptions_copy_to(&rko->rko_u.admin_request.options, + options); else rd_kafka_AdminOptions_init(rk, &rko->rko_u.admin_request.options); @@ -1220,6 +1309,54 @@ return rko; } +/** + * @brief Duplicate the fanout operation for each nodeid passed and + * enqueue each new operation. Use the same fanout_parent as + * the passed \p rko. + * + * @param rk Client instance. + * @param rko Operation to distribute to each broker. + * @param nodeids List of int32_t with the broker nodeids. + * @param rkq + * @return rd_kafka_op_t* + */ +static void rd_kafka_admin_fanout_op_distribute(rd_kafka_t *rk, + rd_kafka_op_t *rko, + rd_list_t *nodeids) { + int i, nodeids_cnt, timeout_remains; + rd_kafka_op_t *rko_fanout; + rd_kafka_AdminOptions_t *options = &rko->rko_u.admin_request.options; + timeout_remains = rd_kafka_admin_timeout_remains(rko); + rd_kafka_AdminOptions_set_request_timeout(options, timeout_remains, + NULL, 0); + + nodeids_cnt = rd_list_cnt(nodeids); + rko_fanout = rko->rko_u.admin_request.fanout_parent; + rko_fanout->rko_u.admin_request.fanout.outstanding = (int)nodeids_cnt; + rko->rko_u.admin_request.fanout_parent = NULL; + + /* Create individual request ops for each node */ + for (i = 0; i < nodeids_cnt; i++) { + rd_kafka_op_t *rko_dup = rd_kafka_admin_request_op_new( + rk, rko->rko_type, + rko->rko_u.admin_request.reply_event_type, + rko->rko_u.admin_request.cbs, options, rk->rk_ops); + + rko_dup->rko_u.admin_request.fanout_parent = rko_fanout; + rko_dup->rko_u.admin_request.broker_id = + rd_list_get_int32(nodeids, i); + + rd_list_init_copy(&rko_dup->rko_u.admin_request.args, + &rko->rko_u.admin_request.args); + rd_list_copy_to( + &rko_dup->rko_u.admin_request.args, + &rko->rko_u.admin_request.args, + rko_fanout->rko_u.admin_request.fanout.cbs->copy_arg, NULL); + + rd_kafka_q_enq(rk->rk_ops, rko_dup); + } +} + /** * @brief Common fanout worker state machine handling regardless of request type @@ -1299,6 +1436,50 @@ return RD_KAFKA_OP_RES_HANDLED; /* trigger's op_destroy(rko) */ } +/** + * @brief Create a new operation that targets all the brokers. + * The operation consists of a fanout parent that is reused and + * fanout operation that is duplicated for each broker found. + * + * @param rk Client instance- + * @param optype Operation type. + * @param reply_event_type Reply event type. + * @param cbs Fanned out op callbacks. + * @param fanout_cbs Fanout parent out op callbacks. + * @param result_free Callback for freeing the result list. + * @param options Operation options. + * @param rkq Result queue. + * @return The newly created op targeting all the brokers. + * + * @sa Use rd_kafka_op_destroy() to release it. + */ +static rd_kafka_op_t *rd_kafka_admin_request_op_target_all_new( + rd_kafka_t *rk, + rd_kafka_op_type_t optype, + rd_kafka_event_type_t reply_event_type, + const struct rd_kafka_admin_worker_cbs *cbs, + const struct rd_kafka_admin_fanout_worker_cbs *fanout_cbs, + void (*result_free)(void *), + const rd_kafka_AdminOptions_t *options, + rd_kafka_q_t *rkq) { + rd_kafka_op_t *rko, *rko_fanout; + + rko_fanout = rd_kafka_admin_fanout_op_new(rk, optype, reply_event_type, + fanout_cbs, options, rkq); + + rko = rd_kafka_admin_request_op_new(rk, optype, reply_event_type, cbs, + options, rk->rk_ops); + + rko_fanout->rko_u.admin_request.fanout.outstanding = 1; + rko->rko_u.admin_request.fanout_parent = rko_fanout; + rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_ALL; + + rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, (int)1, + result_free); + + return rko; +} + /**@}*/ @@ -1366,6 +1547,63 @@ &ibroker_id, errstr, errstr_size); } +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets( + rd_kafka_AdminOptions_t *options, + int true_or_false) { + char errstr[512]; + rd_kafka_resp_err_t err = rd_kafka_confval_set_type( + &options->require_stable_offsets, RD_KAFKA_CONFVAL_INT, + &true_or_false, errstr, sizeof(errstr)); + return !err ? NULL : rd_kafka_error_new(err, "%s", errstr); +} + +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states( + rd_kafka_AdminOptions_t *options, + const rd_kafka_consumer_group_state_t *consumer_group_states, + size_t consumer_group_states_cnt) { + size_t i; + char errstr[512]; + rd_kafka_resp_err_t err; + rd_list_t *states_list = rd_list_new(0, NULL); + rd_list_init_int32(states_list, consumer_group_states_cnt); + uint64_t states_bitmask = 0; + + if (RD_KAFKA_CONSUMER_GROUP_STATE__CNT >= 64) { + rd_assert("BUG: cannot handle states with a bitmask anymore"); + } + + for (i = 0; i < consumer_group_states_cnt; i++) { + uint64_t state_bit; + rd_kafka_consumer_group_state_t state = + consumer_group_states[i]; + + if (state < 0 || state >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT) { + rd_list_destroy(states_list); + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Invalid group state value"); + } + + state_bit = 1 << state; + if (states_bitmask & state_bit) { + rd_list_destroy(states_list); + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate states not allowed"); + } else { + states_bitmask = states_bitmask | state_bit; + rd_list_set_int32(states_list, (int32_t)i, state); + } + } + err = rd_kafka_confval_set_type(&options->match_consumer_group_states, + RD_KAFKA_CONFVAL_PTR, states_list, + errstr, sizeof(errstr)); + if (err) { + rd_list_destroy(states_list); + } + return !err ? NULL : rd_kafka_error_new(err, "%s", errstr); +} + void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *opaque) { rd_kafka_confval_set_type(&options->opaque, RD_KAFKA_CONFVAL_PTR, @@ -1411,10 +1649,48 @@ else rd_kafka_confval_disable(&options->incremental, "incremental"); + if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || + options->for_api == RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS) + rd_kafka_confval_init_int(&options->require_stable_offsets, + "require_stable_offsets", 0, 1, 0); + else + rd_kafka_confval_disable(&options->require_stable_offsets, + "require_stable_offsets"); + + if (options->for_api == RD_KAFKA_ADMIN_OP_ANY || + options->for_api == RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS) + rd_kafka_confval_init_ptr(&options->match_consumer_group_states, + "match_consumer_group_states"); + else + rd_kafka_confval_disable(&options->match_consumer_group_states, + "match_consumer_group_states"); + rd_kafka_confval_init_int(&options->broker, "broker", 0, INT32_MAX, -1); rd_kafka_confval_init_ptr(&options->opaque, "opaque"); } +/** + * @brief Copy contents of \p src to \p dst. + * Deep copy every pointer confval. + * + * @param dst The destination AdminOptions. + * @param src The source AdminOptions. + */ +static void rd_kafka_AdminOptions_copy_to(rd_kafka_AdminOptions_t *dst, + const rd_kafka_AdminOptions_t *src) { + *dst = *src; + if (src->match_consumer_group_states.u.PTR) { + char errstr[512]; + rd_list_t *states_list_copy = rd_list_copy_preallocated( + src->match_consumer_group_states.u.PTR, NULL); + + rd_kafka_resp_err_t err = rd_kafka_confval_set_type( + &dst->match_consumer_group_states, RD_KAFKA_CONFVAL_PTR, + states_list_copy, errstr, sizeof(errstr)); + rd_assert(!err); + } +} + rd_kafka_AdminOptions_t * rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api) { @@ -1433,6 +1709,9 @@ } void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options) { + if (options->match_consumer_group_states.u.PTR) { + rd_list_destroy(options->match_consumer_group_states.u.PTR); + } rd_free(options); } @@ -3971,7 +4250,6 @@ cntp); } -RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets( rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, @@ -4801,3 +5079,1578 @@ } /**@}*/ + +/** + * @name Alter consumer group offsets (committed offsets) + * @{ + * + * + * + * + */ + +rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions) { + rd_assert(group_id && partitions); + + size_t tsize = strlen(group_id) + 1; + rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets; + + /* Single allocation */ + alter_grpoffsets = rd_malloc(sizeof(*alter_grpoffsets) + tsize); + alter_grpoffsets->group_id = alter_grpoffsets->data; + memcpy(alter_grpoffsets->group_id, group_id, tsize); + alter_grpoffsets->partitions = + rd_kafka_topic_partition_list_copy(partitions); + + return alter_grpoffsets; +} + +void rd_kafka_AlterConsumerGroupOffsets_destroy( + rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets) { + rd_kafka_topic_partition_list_destroy(alter_grpoffsets->partitions); + rd_free(alter_grpoffsets); +} + +static void rd_kafka_AlterConsumerGroupOffsets_free(void *ptr) { + rd_kafka_AlterConsumerGroupOffsets_destroy(ptr); +} + +void rd_kafka_AlterConsumerGroupOffsets_destroy_array( + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffsets_cnt) { + size_t i; + for (i = 0; i < alter_grpoffsets_cnt; i++) + rd_kafka_AlterConsumerGroupOffsets_destroy(alter_grpoffsets[i]); +} + +/** + * @brief Allocate a new AlterGroup and make a copy of \p src + */ +static rd_kafka_AlterConsumerGroupOffsets_t * +rd_kafka_AlterConsumerGroupOffsets_copy( + const rd_kafka_AlterConsumerGroupOffsets_t *src) { + return rd_kafka_AlterConsumerGroupOffsets_new(src->group_id, + src->partitions); +} + +/** + * @brief Send a OffsetCommitRequest to \p rkb with the partitions + * in alter_grpoffsets (AlterConsumerGroupOffsets_t*) using + * \p options. + * + */ +static rd_kafka_resp_err_t rd_kafka_AlterConsumerGroupOffsetsRequest( + rd_kafka_broker_t *rkb, + /* (rd_kafka_AlterConsumerGroupOffsets_t*) */ + const rd_list_t *alter_grpoffsets, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + const rd_kafka_AlterConsumerGroupOffsets_t *grpoffsets = + rd_list_elem(alter_grpoffsets, 0); + + rd_assert(rd_list_cnt(alter_grpoffsets) == 1); + + rd_kafka_topic_partition_list_t *offsets = grpoffsets->partitions; + rd_kafka_consumer_group_metadata_t *cgmetadata = + rd_kafka_consumer_group_metadata_new(grpoffsets->group_id); + + int ret = rd_kafka_OffsetCommitRequest( + rkb, cgmetadata, offsets, replyq, resp_cb, opaque, + "rd_kafka_AlterConsumerGroupOffsetsRequest"); + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + if (ret == 0) { + rd_snprintf(errstr, errstr_size, + "At least one topic-partition offset must " + "be >= 0"); + return RD_KAFKA_RESP_ERR__NO_OFFSET; + } + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parse OffsetCommitResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_AlterConsumerGroupOffsetsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + rd_kafka_t *rk; + rd_kafka_broker_t *rkb; + rd_kafka_op_t *rko_result; + rd_kafka_topic_partition_list_t *partitions = NULL; + rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; + const rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets = + rd_list_elem(&rko_req->rko_u.admin_request.args, 0); + partitions = + rd_kafka_topic_partition_list_copy(alter_grpoffsets->partitions); + + rk = rko_req->rko_rk; + rkb = reply->rkbuf_rkb; + err = rd_kafka_handle_OffsetCommit(rk, rkb, err, reply, NULL, + partitions, rd_true); + + /* Create result op and group_result_t */ + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, 1, + rd_kafka_group_result_free); + rd_list_add(&rko_result->rko_u.admin_result.results, + rd_kafka_group_result_new(alter_grpoffsets->group_id, -1, + partitions, NULL)); + rd_kafka_topic_partition_list_destroy(partitions); + *rko_resultp = rko_result; + + if (reply->rkbuf_err) + rd_snprintf( + errstr, errstr_size, + "AlterConsumerGroupOffset response parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +void rd_kafka_AlterConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + int i; + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_AlterConsumerGroupOffsetsRequest, + rd_kafka_AlterConsumerGroupOffsetsResponse_parse, + }; + rd_kafka_op_t *rko; + rd_kafka_topic_partition_list_t *copied_offsets; + + rd_assert(rkqu); + + rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS, + RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT, &cbs, options, + rkqu->rkqu_q); + + if (alter_grpoffsets_cnt != 1) { + /* For simplicity we only support one single group for now */ + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Exactly one " + "AlterConsumerGroupOffsets must " + "be passed"); + goto fail; + } + + if (alter_grpoffsets[0]->partitions->cnt == 0) { + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Non-empty topic partition list " + "must be present"); + goto fail; + } + + for (i = 0; i < alter_grpoffsets[0]->partitions->cnt; i++) { + if (alter_grpoffsets[0]->partitions->elems[i].offset < 0) { + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "All topic-partition offsets " + "must be >= 0"); + goto fail; + } + } + + /* TODO: add group id duplication check if in future more than one + * AlterConsumerGroupOffsets can be passed */ + + /* Copy offsets list for checking duplicated */ + copied_offsets = + rd_kafka_topic_partition_list_copy(alter_grpoffsets[0]->partitions); + if (rd_kafka_topic_partition_list_has_duplicates( + copied_offsets, rd_false /*check partition*/)) { + rd_kafka_topic_partition_list_destroy(copied_offsets); + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate partitions not allowed"); + goto fail; + } + rd_kafka_topic_partition_list_destroy(copied_offsets); + + rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR; + rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP; + rko->rko_u.admin_request.coordkey = + rd_strdup(alter_grpoffsets[0]->group_id); + + /* Store copy of group on request so the group name can be reached + * from the response parser. */ + rd_list_init(&rko->rko_u.admin_request.args, 1, + rd_kafka_AlterConsumerGroupOffsets_free); + rd_list_add(&rko->rko_u.admin_request.args, + (void *)rd_kafka_AlterConsumerGroupOffsets_copy( + alter_grpoffsets[0])); + + rd_kafka_q_enq(rk->rk_ops, rko); + return; +fail: + rd_kafka_admin_common_worker_destroy(rk, rko, rd_true /*destroy*/); +} + + +/** + * @brief Get an array of group results from a AlterGroups result. + * + * The returned \p groups life-time is the same as the \p result object. + * @param cntp is updated to the number of elements in the array. + */ +const rd_kafka_group_result_t ** +rd_kafka_AlterConsumerGroupOffsets_result_groups( + const rd_kafka_AlterConsumerGroupOffsets_result_t *result, + size_t *cntp) { + return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result, + cntp); +} + +/**@}*/ + + +/**@}*/ + +/** + * @name List consumer group offsets (committed offsets) + * @{ + * + * + * + * + */ + +rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions) { + size_t tsize = strlen(group_id) + 1; + rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets; + + rd_assert(group_id); + + /* Single allocation */ + list_grpoffsets = rd_calloc(1, sizeof(*list_grpoffsets) + tsize); + list_grpoffsets->group_id = list_grpoffsets->data; + memcpy(list_grpoffsets->group_id, group_id, tsize); + if (partitions) { + list_grpoffsets->partitions = + rd_kafka_topic_partition_list_copy(partitions); + } + + return list_grpoffsets; +} + +void rd_kafka_ListConsumerGroupOffsets_destroy( + rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets) { + if (list_grpoffsets->partitions != NULL) { + rd_kafka_topic_partition_list_destroy( + list_grpoffsets->partitions); + } + rd_free(list_grpoffsets); +} + +static void rd_kafka_ListConsumerGroupOffsets_free(void *ptr) { + rd_kafka_ListConsumerGroupOffsets_destroy(ptr); +} + +void rd_kafka_ListConsumerGroupOffsets_destroy_array( + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffsets_cnt) { + size_t i; + for (i = 0; i < list_grpoffsets_cnt; i++) + rd_kafka_ListConsumerGroupOffsets_destroy(list_grpoffsets[i]); +} + +/** + * @brief Allocate a new ListGroup and make a copy of \p src + */ +static rd_kafka_ListConsumerGroupOffsets_t * +rd_kafka_ListConsumerGroupOffsets_copy( + const rd_kafka_ListConsumerGroupOffsets_t *src) { + return rd_kafka_ListConsumerGroupOffsets_new(src->group_id, + src->partitions); +} + +/** + * @brief Send a OffsetFetchRequest to \p rkb with the partitions + * in list_grpoffsets (ListConsumerGroupOffsets_t*) using + * \p options. + * + */ +static rd_kafka_resp_err_t rd_kafka_ListConsumerGroupOffsetsRequest( + rd_kafka_broker_t *rkb, + /* (rd_kafka_ListConsumerGroupOffsets_t*) */ + const rd_list_t *list_grpoffsets, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + int op_timeout; + rd_bool_t require_stable_offsets; + const rd_kafka_ListConsumerGroupOffsets_t *grpoffsets = + rd_list_elem(list_grpoffsets, 0); + + rd_assert(rd_list_cnt(list_grpoffsets) == 1); + + op_timeout = rd_kafka_confval_get_int(&options->request_timeout); + require_stable_offsets = + rd_kafka_confval_get_int(&options->require_stable_offsets); + rd_kafka_OffsetFetchRequest( + rkb, grpoffsets->group_id, grpoffsets->partitions, + require_stable_offsets, op_timeout, replyq, resp_cb, opaque); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parse OffsetFetchResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_ListConsumerGroupOffsetsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets = + rd_list_elem(&rko_req->rko_u.admin_request.args, 0); + rd_kafka_t *rk; + rd_kafka_broker_t *rkb; + rd_kafka_topic_partition_list_t *offsets = NULL; + rd_kafka_op_t *rko_result; + rd_kafka_resp_err_t err; + + rk = rko_req->rko_rk; + rkb = reply->rkbuf_rkb; + err = rd_kafka_handle_OffsetFetch(rk, rkb, RD_KAFKA_RESP_ERR_NO_ERROR, + reply, NULL, &offsets, rd_false, + rd_true, rd_false); + + if (unlikely(err != RD_KAFKA_RESP_ERR_NO_ERROR)) { + reply->rkbuf_err = err; + goto err; + } + + /* Create result op and group_result_t */ + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, 1, + rd_kafka_group_result_free); + rd_list_add(&rko_result->rko_u.admin_result.results, + rd_kafka_group_result_new(list_grpoffsets->group_id, -1, + offsets, NULL)); + + if (likely(offsets != NULL)) + rd_kafka_topic_partition_list_destroy(offsets); + + *rko_resultp = rko_result; + + return RD_KAFKA_RESP_ERR_NO_ERROR; +err: + if (likely(offsets != NULL)) + rd_kafka_topic_partition_list_destroy(offsets); + + rd_snprintf(errstr, errstr_size, + "ListConsumerGroupOffsetsResponse response failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +void rd_kafka_ListConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_ListConsumerGroupOffsetsRequest, + rd_kafka_ListConsumerGroupOffsetsResponse_parse, + }; + rd_kafka_op_t *rko; + rd_kafka_topic_partition_list_t *copied_offsets; + + rd_assert(rkqu); + + rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS, + RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT, &cbs, options, + rkqu->rkqu_q); + + if (list_grpoffsets_cnt != 1) { + /* For simplicity we only support one single group for now */ + rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Exactly one " + "ListConsumerGroupOffsets must " + "be passed"); + goto fail; + } + + if (list_grpoffsets[0]->partitions != NULL && + list_grpoffsets[0]->partitions->cnt == 0) { + /* Either pass NULL for all the partitions or a non-empty list + */ + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "NULL or " + "non-empty topic partition list must " + "be passed"); + goto fail; + } + + /* TODO: add group id duplication check when implementing KIP-709 */ + if (list_grpoffsets[0]->partitions != NULL) { + /* Copy offsets list for checking duplicated */ + copied_offsets = rd_kafka_topic_partition_list_copy( + list_grpoffsets[0]->partitions); + if (rd_kafka_topic_partition_list_has_duplicates( + copied_offsets, rd_false /*check partition*/)) { + rd_kafka_topic_partition_list_destroy(copied_offsets); + rd_kafka_admin_result_fail( + rko, RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate partitions not allowed"); + goto fail; + } + rd_kafka_topic_partition_list_destroy(copied_offsets); + } + + rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR; + rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP; + rko->rko_u.admin_request.coordkey = + rd_strdup(list_grpoffsets[0]->group_id); + + /* Store copy of group on request so the group name can be reached + * from the response parser. */ + rd_list_init(&rko->rko_u.admin_request.args, 1, + rd_kafka_ListConsumerGroupOffsets_free); + rd_list_add(&rko->rko_u.admin_request.args, + rd_kafka_ListConsumerGroupOffsets_copy(list_grpoffsets[0])); + + rd_kafka_q_enq(rk->rk_ops, rko); + return; +fail: + rd_kafka_admin_common_worker_destroy(rk, rko, rd_true /*destroy*/); +} + + +/** + * @brief Get an array of group results from a ListConsumerGroups result. + * + * The returned \p groups life-time is the same as the \p result object. + * @param cntp is updated to the number of elements in the array. + */ +const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups( + const rd_kafka_ListConsumerGroupOffsets_result_t *result, + size_t *cntp) { + return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result, + cntp); +} + +/**@}*/ + +/** + * @name List consumer groups + * @{ + * + * + * + * + */ + +#define CONSUMER_PROTOCOL_TYPE "consumer" + +/** + * @brief Create a new ConsumerGroupListing object. + * + * @param group_id The group id. + * @param is_simple_consumer_group Is the group simple? + * @param state Group state. + */ +static rd_kafka_ConsumerGroupListing_t * +rd_kafka_ConsumerGroupListing_new(const char *group_id, + rd_bool_t is_simple_consumer_group, + rd_kafka_consumer_group_state_t state) { + rd_kafka_ConsumerGroupListing_t *grplist; + grplist = rd_calloc(1, sizeof(*grplist)); + grplist->group_id = rd_strdup(group_id); + grplist->is_simple_consumer_group = is_simple_consumer_group; + grplist->state = state; + return grplist; +} + +/** + * @brief Copy \p grplist ConsumerGroupListing. + * + * @param grplist The group listing to copy. + * @return A new allocated copy of the passed ConsumerGroupListing. + */ +static rd_kafka_ConsumerGroupListing_t *rd_kafka_ConsumerGroupListing_copy( + const rd_kafka_ConsumerGroupListing_t *grplist) { + return rd_kafka_ConsumerGroupListing_new( + grplist->group_id, grplist->is_simple_consumer_group, + grplist->state); +} + +/** + * @brief Same as rd_kafka_ConsumerGroupListing_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +static void *rd_kafka_ConsumerGroupListing_copy_opaque(const void *grplist, + void *opaque) { + return rd_kafka_ConsumerGroupListing_copy(grplist); +} + +static void rd_kafka_ConsumerGroupListing_destroy( + rd_kafka_ConsumerGroupListing_t *grplist) { + RD_IF_FREE(grplist->group_id, rd_free); + rd_free(grplist); +} + +static void rd_kafka_ConsumerGroupListing_free(void *ptr) { + rd_kafka_ConsumerGroupListing_destroy(ptr); +} + +const char *rd_kafka_ConsumerGroupListing_group_id( + const rd_kafka_ConsumerGroupListing_t *grplist) { + return grplist->group_id; +} + +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group( + const rd_kafka_ConsumerGroupListing_t *grplist) { + return grplist->is_simple_consumer_group; +} + +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state( + const rd_kafka_ConsumerGroupListing_t *grplist) { + return grplist->state; +} + +/** + * @brief Create a new ListConsumerGroupsResult object. + * + * @param valid + * @param errors + */ +static rd_kafka_ListConsumerGroupsResult_t * +rd_kafka_ListConsumerGroupsResult_new(const rd_list_t *valid, + const rd_list_t *errors) { + rd_kafka_ListConsumerGroupsResult_t *res; + res = rd_calloc(1, sizeof(*res)); + rd_list_init_copy(&res->valid, valid); + rd_list_copy_to(&res->valid, valid, + rd_kafka_ConsumerGroupListing_copy_opaque, NULL); + rd_list_init_copy(&res->errors, errors); + rd_list_copy_to(&res->errors, errors, rd_kafka_error_copy_opaque, NULL); + return res; +} + +static void rd_kafka_ListConsumerGroupsResult_destroy( + rd_kafka_ListConsumerGroupsResult_t *res) { + rd_list_destroy(&res->valid); + rd_list_destroy(&res->errors); + rd_free(res); +} + +static void rd_kafka_ListConsumerGroupsResult_free(void *ptr) { + rd_kafka_ListConsumerGroupsResult_destroy(ptr); +} + +/** + * @brief Copy the passed ListConsumerGroupsResult. + * + * @param res the ListConsumerGroupsResult to copy + * @return a newly allocated ListConsumerGroupsResult object. + * + * @sa Release the object with rd_kafka_ListConsumerGroupsResult_destroy(). + */ +static rd_kafka_ListConsumerGroupsResult_t * +rd_kafka_ListConsumerGroupsResult_copy( + const rd_kafka_ListConsumerGroupsResult_t *res) { + return rd_kafka_ListConsumerGroupsResult_new(&res->valid, &res->errors); +} + +/** + * @brief Same as rd_kafka_ListConsumerGroupsResult_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +static void *rd_kafka_ListConsumerGroupsResult_copy_opaque(const void *list, + void *opaque) { + return rd_kafka_ListConsumerGroupsResult_copy(list); +} + +/** + * @brief Send ListConsumerGroupsRequest. Admin worker compatible callback. + */ +static rd_kafka_resp_err_t +rd_kafka_admin_ListConsumerGroupsRequest(rd_kafka_broker_t *rkb, + const rd_list_t *groups /*(char*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + int i; + rd_kafka_resp_err_t err; + rd_kafka_error_t *error; + const char **states_str = NULL; + int states_str_cnt = 0; + rd_list_t *states = + rd_kafka_confval_get_ptr(&options->match_consumer_group_states); + + /* Prepare list_options */ + if (states && rd_list_cnt(states) > 0) { + states_str_cnt = rd_list_cnt(states); + states_str = rd_calloc(states_str_cnt, sizeof(*states_str)); + for (i = 0; i < states_str_cnt; i++) { + states_str[i] = rd_kafka_consumer_group_state_name( + rd_list_get_int32(states, i)); + } + } + + error = rd_kafka_ListGroupsRequest(rkb, -1, states_str, states_str_cnt, + replyq, resp_cb, opaque); + + if (states_str) { + rd_free(states_str); + } + + if (error) { + rd_snprintf(errstr, errstr_size, "%s", + rd_kafka_error_string(error)); + err = rd_kafka_error_code(error); + rd_kafka_error_destroy(error); + return err; + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parse ListConsumerGroupsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_ListConsumerGroupsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + int i, cnt; + int16_t error_code, api_version; + rd_kafka_op_t *rko_result = NULL; + rd_kafka_error_t *error = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_list_t valid, errors; + rd_kafka_ListConsumerGroupsResult_t *list_result; + char *group_id = NULL, *group_state = NULL, *proto_type = NULL; + + api_version = rd_kafka_buf_ApiVersion(reply); + if (api_version >= 1) { + rd_kafka_buf_read_throttle_time(reply); + } + rd_kafka_buf_read_i16(reply, &error_code); + if (error_code) { + error = rd_kafka_error_new(error_code, + "Broker [%d" + "] " + "ListConsumerGroups: %s", + rd_kafka_broker_id(rkb), + rd_kafka_err2str(error_code)); + } + + rd_kafka_buf_read_arraycnt(reply, &cnt, RD_KAFKAP_GROUPS_MAX); + rd_list_init(&valid, cnt, rd_kafka_ConsumerGroupListing_free); + rd_list_init(&errors, 8, rd_free); + if (error) + rd_list_add(&errors, error); + + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, 1, + rd_kafka_ListConsumerGroupsResult_free); + + for (i = 0; i < cnt; i++) { + rd_kafkap_str_t GroupId, ProtocolType, + GroupState = RD_ZERO_INIT; + rd_kafka_ConsumerGroupListing_t *group_listing; + rd_bool_t is_simple_consumer_group, is_consumer_protocol_type; + rd_kafka_consumer_group_state_t state = + RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN; + + rd_kafka_buf_read_str(reply, &GroupId); + rd_kafka_buf_read_str(reply, &ProtocolType); + if (api_version >= 4) { + rd_kafka_buf_read_str(reply, &GroupState); + } + rd_kafka_buf_skip_tags(reply); + + group_id = RD_KAFKAP_STR_DUP(&GroupId); + proto_type = RD_KAFKAP_STR_DUP(&ProtocolType); + if (api_version >= 4) { + group_state = RD_KAFKAP_STR_DUP(&GroupState); + state = rd_kafka_consumer_group_state_code(group_state); + } + + is_simple_consumer_group = *proto_type == '\0'; + is_consumer_protocol_type = + !strcmp(proto_type, CONSUMER_PROTOCOL_TYPE); + if (is_simple_consumer_group || is_consumer_protocol_type) { + group_listing = rd_kafka_ConsumerGroupListing_new( + group_id, is_simple_consumer_group, state); + rd_list_add(&valid, group_listing); + } + + rd_free(group_id); + rd_free(group_state); + rd_free(proto_type); + group_id = NULL; + group_state = NULL; + proto_type = NULL; + } + rd_kafka_buf_skip_tags(reply); + +err_parse: + if (group_id) + rd_free(group_id); + if (group_state) + rd_free(group_state); + if (proto_type) + rd_free(proto_type); + + if (reply->rkbuf_err) { + error_code = reply->rkbuf_err; + error = rd_kafka_error_new( + error_code, + "Broker [%d" + "] " + "ListConsumerGroups response protocol parse failure: %s", + rd_kafka_broker_id(rkb), rd_kafka_err2str(error_code)); + rd_list_add(&errors, error); + } + + list_result = rd_kafka_ListConsumerGroupsResult_new(&valid, &errors); + rd_list_add(&rko_result->rko_u.admin_result.results, list_result); + + *rko_resultp = rko_result; + rd_list_destroy(&valid); + rd_list_destroy(&errors); + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** @brief Merge the ListConsumerGroups response from a single broker + * into the user response list. + */ +static void +rd_kafka_ListConsumerGroups_response_merge(rd_kafka_op_t *rko_fanout, + const rd_kafka_op_t *rko_partial) { + int cnt; + rd_kafka_ListConsumerGroupsResult_t *res = NULL; + rd_kafka_ListConsumerGroupsResult_t *newres; + rd_list_t new_valid, new_errors; + + rd_assert(rko_partial->rko_evtype == + RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT); + + cnt = rd_list_cnt(&rko_fanout->rko_u.admin_request.fanout.results); + if (cnt) { + res = rd_list_elem( + &rko_fanout->rko_u.admin_request.fanout.results, 0); + } else { + rd_list_init(&new_valid, 0, rd_kafka_ConsumerGroupListing_free); + rd_list_init(&new_errors, 0, rd_free); + res = rd_kafka_ListConsumerGroupsResult_new(&new_valid, + &new_errors); + rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, 0, + res); + rd_list_destroy(&new_valid); + rd_list_destroy(&new_errors); + } + if (!rko_partial->rko_err) { + int new_valid_count, new_errors_count; + const rd_list_t *new_valid_list, *new_errors_list; + /* Read the partial result and merge the valid groups + * and the errors into the fanout parent result. */ + newres = + rd_list_elem(&rko_partial->rko_u.admin_result.results, 0); + rd_assert(newres); + new_valid_count = rd_list_cnt(&newres->valid); + new_errors_count = rd_list_cnt(&newres->errors); + if (new_valid_count) { + new_valid_list = &newres->valid; + rd_list_grow(&res->valid, new_valid_count); + rd_list_copy_to( + &res->valid, new_valid_list, + rd_kafka_ConsumerGroupListing_copy_opaque, NULL); + } + if (new_errors_count) { + new_errors_list = &newres->errors; + rd_list_grow(&res->errors, new_errors_count); + rd_list_copy_to(&res->errors, new_errors_list, + rd_kafka_error_copy_opaque, NULL); + } + } else { + /* Op errored, e.g. timeout */ + rd_list_add(&res->errors, + rd_kafka_error_new(rko_partial->rko_err, NULL)); + } +} + +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko; + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_admin_ListConsumerGroupsRequest, + rd_kafka_ListConsumerGroupsResponse_parse}; + static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = { + rd_kafka_ListConsumerGroups_response_merge, + rd_kafka_ListConsumerGroupsResult_copy_opaque, + }; + + rko = rd_kafka_admin_request_op_target_all_new( + rk, RD_KAFKA_OP_LISTCONSUMERGROUPS, + RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT, &cbs, &fanout_cbs, + rd_kafka_ListConsumerGroupsResult_free, options, rkqu->rkqu_q); + rd_kafka_q_enq(rk->rk_ops, rko); +} + +const rd_kafka_ConsumerGroupListing_t ** +rd_kafka_ListConsumerGroups_result_valid( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp) { + int list_result_cnt; + const rd_kafka_ListConsumerGroupsResult_t *list_result; + const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result; + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPS); + + list_result_cnt = rd_list_cnt(&rko->rko_u.admin_result.results); + rd_assert(list_result_cnt == 1); + list_result = rd_list_elem(&rko->rko_u.admin_result.results, 0); + *cntp = rd_list_cnt(&list_result->valid); + + return (const rd_kafka_ConsumerGroupListing_t **) + list_result->valid.rl_elems; +} + +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp) { + int list_result_cnt, error_cnt; + const rd_kafka_ListConsumerGroupsResult_t *list_result; + const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result; + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPS); + + list_result_cnt = rd_list_cnt(&rko->rko_u.admin_result.results); + rd_assert(list_result_cnt == 1); + list_result = rko->rko_u.admin_result.results.rl_elems[0]; + error_cnt = rd_list_cnt(&list_result->errors); + if (error_cnt == 0) { + *cntp = 0; + return NULL; + } + *cntp = error_cnt; + return (const rd_kafka_error_t **)list_result->errors.rl_elems; +} + +/**@}*/ + +/** + * @name Describe consumer groups + * @{ + * + * + * + * + */ + +/** + * @brief Create a new MemberDescription object. This object is used for + * creating a ConsumerGroupDescription. + * + * @param client_id The client id. + * @param consumer_id The consumer id (or member id). + * @param group_instance_id (optional) The group instance id + * for static membership. + * @param host The consumer host. + * @param assignment The member's assigned partitions, or NULL if none. + * + * @return A new allocated MemberDescription object. + * Use rd_kafka_MemberDescription_destroy() to free when done. + */ +static rd_kafka_MemberDescription_t *rd_kafka_MemberDescription_new( + const char *client_id, + const char *consumer_id, + const char *group_instance_id, + const char *host, + const rd_kafka_topic_partition_list_t *assignment) { + rd_kafka_MemberDescription_t *member; + member = rd_calloc(1, sizeof(*member)); + member->client_id = rd_strdup(client_id); + member->consumer_id = rd_strdup(consumer_id); + if (group_instance_id) + member->group_instance_id = rd_strdup(group_instance_id); + member->host = rd_strdup(host); + if (assignment) + member->assignment.partitions = + rd_kafka_topic_partition_list_copy(assignment); + else + member->assignment.partitions = + rd_kafka_topic_partition_list_new(0); + return member; +} + +/** + * @brief Allocate a new MemberDescription, copy of \p src + * and return it. + * + * @param src The MemberDescription to copy. + * @return A new allocated MemberDescription object, + * Use rd_kafka_MemberDescription_destroy() to free when done. + */ +static rd_kafka_MemberDescription_t * +rd_kafka_MemberDescription_copy(const rd_kafka_MemberDescription_t *src) { + return rd_kafka_MemberDescription_new(src->client_id, src->consumer_id, + src->group_instance_id, src->host, + src->assignment.partitions); +} + +/** + * @brief MemberDescription copy, compatible with rd_list_copy_to. + * + * @param elem The MemberDescription to copy- + * @param opaque Not used. + */ +static void *rd_kafka_MemberDescription_list_copy(const void *elem, + void *opaque) { + return rd_kafka_MemberDescription_copy(elem); +} + +static void +rd_kafka_MemberDescription_destroy(rd_kafka_MemberDescription_t *member) { + rd_free(member->client_id); + rd_free(member->consumer_id); + rd_free(member->host); + if (member->group_instance_id != NULL) + rd_free(member->group_instance_id); + if (member->assignment.partitions) + rd_kafka_topic_partition_list_destroy( + member->assignment.partitions); + rd_free(member); +} + +static void rd_kafka_MemberDescription_free(void *member) { + rd_kafka_MemberDescription_destroy(member); +} + +const char *rd_kafka_MemberDescription_client_id( + const rd_kafka_MemberDescription_t *member) { + return member->client_id; +} + +const char *rd_kafka_MemberDescription_group_instance_id( + const rd_kafka_MemberDescription_t *member) { + return member->group_instance_id; +} + +const char *rd_kafka_MemberDescription_consumer_id( + const rd_kafka_MemberDescription_t *member) { + return member->consumer_id; +} + +const char * +rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member) { + return member->host; +} + +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment( + const rd_kafka_MemberDescription_t *member) { + return &member->assignment; +} + +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions( + const rd_kafka_MemberAssignment_t *assignment) { + return assignment->partitions; +} + + +/** + * @brief Create a new ConsumerGroupDescription object. + * + * @param group_id The group id. + * @param is_simple_consumer_group Is the group simple? + * @param members List of members (rd_kafka_MemberDescription_t) of this + * group. + * @param partition_assignor (optional) Chosen assignor. + * @param state Group state. + * @param coordinator (optional) Group coordinator. + * @param error (optional) Error received for this group. + * @return A new allocated ConsumerGroupDescription object. + * Use rd_kafka_ConsumerGroupDescription_destroy() to free when done. + */ +static rd_kafka_ConsumerGroupDescription_t * +rd_kafka_ConsumerGroupDescription_new(const char *group_id, + rd_bool_t is_simple_consumer_group, + const rd_list_t *members, + const char *partition_assignor, + rd_kafka_consumer_group_state_t state, + const rd_kafka_Node_t *coordinator, + rd_kafka_error_t *error) { + rd_kafka_ConsumerGroupDescription_t *grpdesc; + grpdesc = rd_calloc(1, sizeof(*grpdesc)); + grpdesc->group_id = rd_strdup(group_id); + grpdesc->is_simple_consumer_group = is_simple_consumer_group; + if (members == NULL) { + rd_list_init(&grpdesc->members, 0, + rd_kafka_MemberDescription_free); + } else { + rd_list_init_copy(&grpdesc->members, members); + rd_list_copy_to(&grpdesc->members, members, + rd_kafka_MemberDescription_list_copy, NULL); + } + grpdesc->partition_assignor = !partition_assignor + ? (char *)partition_assignor + : rd_strdup(partition_assignor); + grpdesc->state = state; + if (coordinator != NULL) + grpdesc->coordinator = rd_kafka_Node_copy(coordinator); + grpdesc->error = + error != NULL ? rd_kafka_error_new(rd_kafka_error_code(error), "%s", + rd_kafka_error_string(error)) + : NULL; + return grpdesc; +} + +/** + * @brief New instance of ConsumerGroupDescription from an error. + * + * @param group_id The group id. + * @param error The error. + * @return A new allocated ConsumerGroupDescription with the passed error. + */ +static rd_kafka_ConsumerGroupDescription_t * +rd_kafka_ConsumerGroupDescription_new_error(const char *group_id, + rd_kafka_error_t *error) { + return rd_kafka_ConsumerGroupDescription_new( + group_id, rd_false, NULL, NULL, + RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN, NULL, error); +} + +/** + * @brief Copy \p desc ConsumerGroupDescription. + * + * @param desc The group description to copy. + * @return A new allocated copy of the passed ConsumerGroupDescription. + */ +static rd_kafka_ConsumerGroupDescription_t * +rd_kafka_ConsumerGroupDescription_copy( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return rd_kafka_ConsumerGroupDescription_new( + grpdesc->group_id, grpdesc->is_simple_consumer_group, + &grpdesc->members, grpdesc->partition_assignor, grpdesc->state, + grpdesc->coordinator, grpdesc->error); +} + +/** + * @brief Same as rd_kafka_ConsumerGroupDescription_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +static void *rd_kafka_ConsumerGroupDescription_copy_opaque(const void *grpdesc, + void *opaque) { + return rd_kafka_ConsumerGroupDescription_copy(grpdesc); +} + +static void rd_kafka_ConsumerGroupDescription_destroy( + rd_kafka_ConsumerGroupDescription_t *grpdesc) { + if (likely(grpdesc->group_id != NULL)) + rd_free(grpdesc->group_id); + rd_list_destroy(&grpdesc->members); + if (likely(grpdesc->partition_assignor != NULL)) + rd_free(grpdesc->partition_assignor); + if (likely(grpdesc->error != NULL)) + rd_kafka_error_destroy(grpdesc->error); + if (grpdesc->coordinator) + rd_kafka_Node_destroy(grpdesc->coordinator); + rd_free(grpdesc); +} + +static void rd_kafka_ConsumerGroupDescription_free(void *ptr) { + rd_kafka_ConsumerGroupDescription_destroy(ptr); +} + +const char *rd_kafka_ConsumerGroupDescription_group_id( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->group_id; +} + +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->error; +} + + +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->is_simple_consumer_group; +} + + +const char *rd_kafka_ConsumerGroupDescription_partition_assignor( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->partition_assignor; +} + + +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->state; +} + +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return grpdesc->coordinator; +} + +size_t rd_kafka_ConsumerGroupDescription_member_count( + const rd_kafka_ConsumerGroupDescription_t *grpdesc) { + return rd_list_cnt(&grpdesc->members); +} + +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member( + const rd_kafka_ConsumerGroupDescription_t *grpdesc, + size_t idx) { + return (rd_kafka_MemberDescription_t *)rd_list_elem(&grpdesc->members, + idx); +} + +/** + * @brief Group arguments comparator for DescribeConsumerGroups args + */ +static int rd_kafka_DescribeConsumerGroups_cmp(const void *a, const void *b) { + return strcmp(a, b); +} + +/** @brief Merge the DescribeConsumerGroups response from a single broker + * into the user response list. + */ +static void rd_kafka_DescribeConsumerGroups_response_merge( + rd_kafka_op_t *rko_fanout, + const rd_kafka_op_t *rko_partial) { + rd_kafka_ConsumerGroupDescription_t *groupres = NULL; + rd_kafka_ConsumerGroupDescription_t *newgroupres; + const char *grp = rko_partial->rko_u.admin_result.opaque; + int orig_pos; + + rd_assert(rko_partial->rko_evtype == + RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT); + + if (!rko_partial->rko_err) { + /* Proper results. + * We only send one group per request, make sure it matches */ + groupres = + rd_list_elem(&rko_partial->rko_u.admin_result.results, 0); + rd_assert(groupres); + rd_assert(!strcmp(groupres->group_id, grp)); + newgroupres = rd_kafka_ConsumerGroupDescription_copy(groupres); + } else { + /* Op errored, e.g. timeout */ + rd_kafka_error_t *error = + rd_kafka_error_new(rko_partial->rko_err, NULL); + newgroupres = + rd_kafka_ConsumerGroupDescription_new_error(grp, error); + rd_kafka_error_destroy(error); + } + + /* As a convenience to the application we insert group result + * in the same order as they were requested. */ + orig_pos = rd_list_index(&rko_fanout->rko_u.admin_request.args, grp, + rd_kafka_DescribeConsumerGroups_cmp); + rd_assert(orig_pos != -1); + + /* Make sure result is not already set */ + rd_assert(rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results, + orig_pos) == NULL); + + rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, orig_pos, + newgroupres); +} + + +/** + * @brief Construct and send DescribeConsumerGroupsRequest to \p rkb + * with the groups (char *) in \p groups, using + * \p options. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for + * transmission, otherwise an error code and errstr will be + * updated with a human readable error string. + */ +static rd_kafka_resp_err_t rd_kafka_admin_DescribeConsumerGroupsRequest( + rd_kafka_broker_t *rkb, + const rd_list_t *groups /*(char*)*/, + rd_kafka_AdminOptions_t *options, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + int i; + char *group; + rd_kafka_resp_err_t err; + int groups_cnt = rd_list_cnt(groups); + rd_kafka_error_t *error = NULL; + char **groups_arr = rd_calloc(groups_cnt, sizeof(*groups_arr)); + + RD_LIST_FOREACH(group, groups, i) { + groups_arr[i] = rd_list_elem(groups, i); + } + error = rd_kafka_DescribeGroupsRequest(rkb, -1, groups_arr, groups_cnt, + replyq, resp_cb, opaque); + rd_free(groups_arr); + + if (error) { + rd_snprintf(errstr, errstr_size, "%s", + rd_kafka_error_string(error)); + err = rd_kafka_error_code(error); + rd_kafka_error_destroy(error); + return err; + } + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Parse DescribeConsumerGroupsResponse and create ADMIN_RESULT op. + */ +static rd_kafka_resp_err_t +rd_kafka_DescribeConsumerGroupsResponse_parse(rd_kafka_op_t *rko_req, + rd_kafka_op_t **rko_resultp, + rd_kafka_buf_t *reply, + char *errstr, + size_t errstr_size) { + const int log_decode_errors = LOG_ERR; + int nodeid; + uint16_t port; + int16_t api_version; + int32_t cnt; + rd_kafka_op_t *rko_result = NULL; + rd_kafka_broker_t *rkb = reply->rkbuf_rkb; + rd_kafka_Node_t *node = NULL; + rd_kafka_error_t *error = NULL; + char *group_id = NULL, *group_state = NULL, *proto_type = NULL, + *proto = NULL, *host = NULL; + + api_version = rd_kafka_buf_ApiVersion(reply); + if (api_version >= 1) { + rd_kafka_buf_read_throttle_time(reply); + } + + rd_kafka_buf_read_arraycnt(reply, &cnt, 100000); + + rko_result = rd_kafka_admin_result_new(rko_req); + rd_list_init(&rko_result->rko_u.admin_result.results, cnt, + rd_kafka_ConsumerGroupDescription_free); + + rd_kafka_broker_lock(rkb); + nodeid = rkb->rkb_nodeid; + host = rd_strdup(rkb->rkb_origname); + port = rkb->rkb_port; + rd_kafka_broker_unlock(rkb); + + node = rd_kafka_Node_new(nodeid, host, port, NULL); + while (cnt-- > 0) { + int16_t error_code; + rd_kafkap_str_t GroupId, GroupState, ProtocolType, ProtocolData; + rd_bool_t is_simple_consumer_group, is_consumer_protocol_type; + int32_t member_cnt; + rd_list_t members; + rd_kafka_ConsumerGroupDescription_t *grpdesc = NULL; + + rd_kafka_buf_read_i16(reply, &error_code); + rd_kafka_buf_read_str(reply, &GroupId); + rd_kafka_buf_read_str(reply, &GroupState); + rd_kafka_buf_read_str(reply, &ProtocolType); + rd_kafka_buf_read_str(reply, &ProtocolData); + rd_kafka_buf_read_arraycnt(reply, &member_cnt, 100000); + + group_id = RD_KAFKAP_STR_DUP(&GroupId); + group_state = RD_KAFKAP_STR_DUP(&GroupState); + proto_type = RD_KAFKAP_STR_DUP(&ProtocolType); + proto = RD_KAFKAP_STR_DUP(&ProtocolData); + + if (error_code) { + error = rd_kafka_error_new( + error_code, "DescribeConsumerGroups: %s", + rd_kafka_err2str(error_code)); + } + + is_simple_consumer_group = *proto_type == '\0'; + is_consumer_protocol_type = + !strcmp(proto_type, CONSUMER_PROTOCOL_TYPE); + if (error == NULL && !is_simple_consumer_group && + !is_consumer_protocol_type) { + error = rd_kafka_error_new( + RD_KAFKA_RESP_ERR__INVALID_ARG, + "GroupId %s is not a consumer group (%s).", + group_id, proto_type); + } + + rd_list_init(&members, 0, rd_kafka_MemberDescription_free); + + while (member_cnt-- > 0) { + rd_kafkap_str_t MemberId, ClientId, ClientHost, + GroupInstanceId = RD_KAFKAP_STR_INITIALIZER; + char *member_id, *client_id, *client_host, + *group_instance_id = NULL; + rd_kafkap_bytes_t MemberMetadata, MemberAssignment; + rd_kafka_MemberDescription_t *member; + rd_kafka_topic_partition_list_t *partitions = NULL; + rd_kafka_buf_t *rkbuf; + + rd_kafka_buf_read_str(reply, &MemberId); + if (api_version >= 4) { + rd_kafka_buf_read_str(reply, &GroupInstanceId); + } + rd_kafka_buf_read_str(reply, &ClientId); + rd_kafka_buf_read_str(reply, &ClientHost); + rd_kafka_buf_read_bytes(reply, &MemberMetadata); + rd_kafka_buf_read_bytes(reply, &MemberAssignment); + if (error != NULL) + continue; + + if (RD_KAFKAP_BYTES_LEN(&MemberAssignment) != 0) { + int16_t version; + /* Parse assignment */ + rkbuf = rd_kafka_buf_new_shadow( + MemberAssignment.data, + RD_KAFKAP_BYTES_LEN(&MemberAssignment), + NULL); + /* Protocol parser needs a broker handle + * to log errors on. */ + rkbuf->rkbuf_rkb = rkb; + /* Decreased in rd_kafka_buf_destroy */ + rd_kafka_broker_keep(rkb); + rd_kafka_buf_read_i16(rkbuf, &version); + partitions = rd_kafka_buf_read_topic_partitions( + rkbuf, 0, rd_false, rd_false); + rd_kafka_buf_destroy(rkbuf); + if (!partitions) + rd_kafka_buf_parse_fail( + reply, + "Error reading topic partitions"); + } + + member_id = RD_KAFKAP_STR_DUP(&MemberId); + if (!RD_KAFKAP_STR_IS_NULL(&GroupInstanceId)) { + group_instance_id = + RD_KAFKAP_STR_DUP(&GroupInstanceId); + } + client_id = RD_KAFKAP_STR_DUP(&ClientId); + client_host = RD_KAFKAP_STR_DUP(&ClientHost); + + member = rd_kafka_MemberDescription_new( + client_id, member_id, group_instance_id, + client_host, partitions); + if (partitions) + rd_kafka_topic_partition_list_destroy( + partitions); + rd_list_add(&members, member); + rd_free(member_id); + rd_free(group_instance_id); + rd_free(client_id); + rd_free(client_host); + member_id = NULL; + group_instance_id = NULL; + client_id = NULL; + client_host = NULL; + } + + if (api_version >= 3) { + /* TODO: implement KIP-430 */ + int32_t authorized_operations; + rd_kafka_buf_read_i32(reply, &authorized_operations); + } + + if (error == NULL) { + grpdesc = rd_kafka_ConsumerGroupDescription_new( + group_id, is_simple_consumer_group, &members, proto, + rd_kafka_consumer_group_state_code(group_state), + node, error); + } else { + grpdesc = rd_kafka_ConsumerGroupDescription_new_error( + group_id, error); + } + rd_list_add(&rko_result->rko_u.admin_result.results, grpdesc); + if (error) + rd_kafka_error_destroy(error); + rd_list_destroy(&members); + rd_free(group_id); + rd_free(group_state); + rd_free(proto_type); + rd_free(proto); + error = NULL; + group_id = NULL; + group_state = NULL; + proto_type = NULL; + proto = NULL; + } + + if (host) + rd_free(host); + if (node) + rd_kafka_Node_destroy(node); + *rko_resultp = rko_result; + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + if (group_id) + rd_free(group_id); + if (group_state) + rd_free(group_state); + if (proto_type) + rd_free(proto_type); + if (proto) + rd_free(proto); + if (error) + rd_kafka_error_destroy(error); + if (host) + rd_free(host); + if (node) + rd_kafka_Node_destroy(node); + if (rko_result) + rd_kafka_op_destroy(rko_result); + + rd_snprintf( + errstr, errstr_size, + "DescribeConsumerGroups response protocol parse failure: %s", + rd_kafka_err2str(reply->rkbuf_err)); + + return reply->rkbuf_err; +} + +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, + const char **groups, + size_t groups_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu) { + rd_kafka_op_t *rko_fanout; + rd_list_t dup_list; + size_t i; + static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = { + rd_kafka_DescribeConsumerGroups_response_merge, + rd_kafka_ConsumerGroupDescription_copy_opaque}; + + rd_assert(rkqu); + + rko_fanout = rd_kafka_admin_fanout_op_new( + rk, RD_KAFKA_OP_DESCRIBECONSUMERGROUPS, + RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, &fanout_cbs, options, + rkqu->rkqu_q); + + if (groups_cnt == 0) { + rd_kafka_admin_result_fail(rko_fanout, + RD_KAFKA_RESP_ERR__INVALID_ARG, + "No groups to describe"); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return; + } + + /* Copy group list and store it on the request op. + * Maintain original ordering. */ + rd_list_init(&rko_fanout->rko_u.admin_request.args, (int)groups_cnt, + rd_free); + for (i = 0; i < groups_cnt; i++) + rd_list_add(&rko_fanout->rko_u.admin_request.args, + rd_strdup(groups[i])); + + /* Check for duplicates. + * Make a temporary copy of the group list and sort it to check for + * duplicates, we don't want the original list sorted since we want + * to maintain ordering. */ + rd_list_init(&dup_list, + rd_list_cnt(&rko_fanout->rko_u.admin_request.args), NULL); + rd_list_copy_to(&dup_list, &rko_fanout->rko_u.admin_request.args, NULL, + NULL); + rd_list_sort(&dup_list, rd_kafka_DescribeConsumerGroups_cmp); + if (rd_list_find_duplicate(&dup_list, + rd_kafka_DescribeConsumerGroups_cmp)) { + rd_list_destroy(&dup_list); + rd_kafka_admin_result_fail(rko_fanout, + RD_KAFKA_RESP_ERR__INVALID_ARG, + "Duplicate groups not allowed"); + rd_kafka_admin_common_worker_destroy(rk, rko_fanout, + rd_true /*destroy*/); + return; + } + + rd_list_destroy(&dup_list); + + /* Prepare results list where fanned out op's results will be + * accumulated. */ + rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, + (int)groups_cnt, rd_kafka_ConsumerGroupDescription_free); + rko_fanout->rko_u.admin_request.fanout.outstanding = (int)groups_cnt; + + /* Create individual request ops for each group. + * FIXME: A future optimization is to coalesce all groups for a single + * coordinator into one op. */ + for (i = 0; i < groups_cnt; i++) { + static const struct rd_kafka_admin_worker_cbs cbs = { + rd_kafka_admin_DescribeConsumerGroupsRequest, + rd_kafka_DescribeConsumerGroupsResponse_parse, + }; + char *grp = + rd_list_elem(&rko_fanout->rko_u.admin_request.args, (int)i); + rd_kafka_op_t *rko = rd_kafka_admin_request_op_new( + rk, RD_KAFKA_OP_DESCRIBECONSUMERGROUPS, + RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, &cbs, options, + rk->rk_ops); + + rko->rko_u.admin_request.fanout_parent = rko_fanout; + rko->rko_u.admin_request.broker_id = + RD_KAFKA_ADMIN_TARGET_COORDINATOR; + rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP; + rko->rko_u.admin_request.coordkey = rd_strdup(grp); + + /* Set the group name as the opaque so the fanout worker use it + * to fill in errors. + * References rko_fanout's memory, which will always outlive + * the fanned out op. */ + rd_kafka_AdminOptions_set_opaque( + &rko->rko_u.admin_request.options, grp); + + rd_list_init(&rko->rko_u.admin_request.args, 1, rd_free); + rd_list_add(&rko->rko_u.admin_request.args, + rd_strdup(groups[i])); + + rd_kafka_q_enq(rk->rk_ops, rko); + } +} + +const rd_kafka_ConsumerGroupDescription_t ** +rd_kafka_DescribeConsumerGroups_result_groups( + const rd_kafka_DescribeConsumerGroups_result_t *result, + size_t *cntp) { + const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result; + rd_kafka_op_type_t reqtype = + rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK; + rd_assert(reqtype == RD_KAFKA_OP_DESCRIBECONSUMERGROUPS); + + *cntp = rd_list_cnt(&rko->rko_u.admin_result.results); + return (const rd_kafka_ConsumerGroupDescription_t **) + rko->rko_u.admin_result.results.rl_elems; +} + +/**@}*/ diff -Nru librdkafka-1.9.2/src/rdkafka_admin.h librdkafka-2.0.2/src/rdkafka_admin.h --- librdkafka-1.9.2/src/rdkafka_admin.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_admin.h 2023-01-20 09:14:36.000000000 +0000 @@ -31,6 +31,7 @@ #include "rdstring.h" +#include "rdkafka_error.h" #include "rdkafka_confval.h" @@ -84,6 +85,19 @@ * all */ + rd_kafka_confval_t + require_stable_offsets; /**< BOOL: Whether broker should return + * stable offsets (transaction-committed). + * Valid for: + * ListConsumerGroupOffsets + */ + + rd_kafka_confval_t + match_consumer_group_states; /**< PTR: list of consumer group states + * to query for. + * Valid for: ListConsumerGroups. + */ + rd_kafka_confval_t opaque; /**< PTR: Application opaque. * Valid for all. */ }; @@ -342,5 +356,127 @@ }; /**@}*/ + + +/** + * @name AlterConsumerGroupOffsets + * @{ + */ + +/** + * @brief AlterConsumerGroupOffsets result + */ +struct rd_kafka_AlterConsumerGroupOffsets_result_s { + rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */ +}; + +struct rd_kafka_AlterConsumerGroupOffsets_s { + char *group_id; /**< Points to data */ + rd_kafka_topic_partition_list_t *partitions; + char data[1]; /**< The group id is allocated along with + * the struct here. */ +}; + +/**@}*/ + + +/** + * @name ListConsumerGroupOffsets + * @{ + */ + +/** + * @brief ListConsumerGroupOffsets result + */ +struct rd_kafka_ListConsumerGroupOffsets_result_s { + rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */ +}; + +struct rd_kafka_ListConsumerGroupOffsets_s { + char *group_id; /**< Points to data */ + rd_kafka_topic_partition_list_t *partitions; + char data[1]; /**< The group id is allocated along with + * the struct here. */ +}; + +/**@}*/ + +/** + * @name ListConsumerGroups + * @{ + */ + +/** + * @struct ListConsumerGroups result for a single group + */ +struct rd_kafka_ConsumerGroupListing_s { + char *group_id; /**< Group id */ + /** Is it a simple consumer group? That means empty protocol_type. */ + rd_bool_t is_simple_consumer_group; + rd_kafka_consumer_group_state_t state; /**< Consumer group state. */ +}; + + +/** + * @struct ListConsumerGroups results and errors + */ +struct rd_kafka_ListConsumerGroupsResult_s { + rd_list_t valid; /**< List of valid ConsumerGroupListing + (rd_kafka_ConsumerGroupListing_t *) */ + rd_list_t errors; /**< List of errors (rd_kafka_error_t *) */ +}; + +/**@}*/ + +/** + * @name DescribeConsumerGroups + * @{ + */ + +/** + * @struct Assignment of a consumer group member. + * + */ +struct rd_kafka_MemberAssignment_s { + /** Partitions assigned to current member. */ + rd_kafka_topic_partition_list_t *partitions; +}; + +/** + * @struct Description of a consumer group member. + * + */ +struct rd_kafka_MemberDescription_s { + char *client_id; /**< Client id */ + char *consumer_id; /**< Consumer id */ + char *group_instance_id; /**< Group instance id */ + char *host; /**< Group member host */ + rd_kafka_MemberAssignment_t assignment; /**< Member assignment */ +}; + +/** + * @struct DescribeConsumerGroups result + */ +struct rd_kafka_ConsumerGroupDescription_s { + /** Group id */ + char *group_id; + /** Is it a simple consumer group? That means empty protocol_type. */ + rd_bool_t is_simple_consumer_group; + /** List of members. + * Type (rd_kafka_MemberDescription_t *): members list */ + rd_list_t members; + /** Protocol type */ + char *protocol_type; + /** Partition assignor identifier. */ + char *partition_assignor; + /** Consumer group state. */ + rd_kafka_consumer_group_state_t state; + /** Consumer group coordinator. */ + rd_kafka_Node_t *coordinator; + /** Group specific error. */ + rd_kafka_error_t *error; +}; + +/**@}*/ #endif /* _RDKAFKA_ADMIN_H_ */ diff -Nru librdkafka-1.9.2/src/rdkafka_assignment.c librdkafka-2.0.2/src/rdkafka_assignment.c --- librdkafka-1.9.2/src/rdkafka_assignment.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_assignment.c 2023-01-20 09:14:36.000000000 +0000 @@ -529,9 +529,10 @@ partitions_to_query->cnt); rd_kafka_OffsetFetchRequest( - coord, partitions_to_query, + coord, rk->rk_group_id->str, partitions_to_query, rk->rk_conf.isolation_level == - RD_KAFKA_READ_COMMITTED /*require_stable*/, + RD_KAFKA_READ_COMMITTED /*require_stable_offsets*/, + 0, /* Timeout */ RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_assignment_handle_OffsetFetch, /* Must be freed by handler */ diff -Nru librdkafka-1.9.2/src/rdkafka_aux.c librdkafka-2.0.2/src/rdkafka_aux.c --- librdkafka-1.9.2/src/rdkafka_aux.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_aux.c 2023-01-20 09:14:36.000000000 +0000 @@ -222,3 +222,57 @@ void rd_kafka_acl_result_free(void *ptr) { rd_kafka_acl_result_destroy((rd_kafka_acl_result_t *)ptr); } + + +/** + * @brief Create a new Node object. + * + * @param id The node id. + * @param host The node host. + * @param port The node port. + * @param rack_id (optional) The node rack id. + * @return A new allocated Node object. + * Use rd_kafka_Node_destroy() to free when done. + */ +rd_kafka_Node_t *rd_kafka_Node_new(int id, + const char *host, + uint16_t port, + const char *rack_id) { + rd_kafka_Node_t *ret = rd_calloc(1, sizeof(*ret)); + ret->id = id; + ret->port = port; + ret->host = rd_strdup(host); + if (rack_id != NULL) + ret->rack_id = rd_strdup(rack_id); + return ret; +} + +/** + * @brief Copy \p src Node object + * + * @param src The Node to copy. + * @return A new allocated Node object. + * Use rd_kafka_Node_destroy() to free when done. + */ +rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src) { + return rd_kafka_Node_new(src->id, src->host, src->port, src->rack_id); +} + +void rd_kafka_Node_destroy(rd_kafka_Node_t *node) { + rd_free(node->host); + if (node->rack_id) + rd_free(node->rack_id); + rd_free(node); +} + +int rd_kafka_Node_id(const rd_kafka_Node_t *node) { + return node->id; +} + +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node) { + return node->host; +} + +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node) { + return node->port; +} diff -Nru librdkafka-1.9.2/src/rdkafka_aux.h librdkafka-2.0.2/src/rdkafka_aux.h --- librdkafka-1.9.2/src/rdkafka_aux.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_aux.h 2023-01-20 09:14:36.000000000 +0000 @@ -99,4 +99,22 @@ void *rd_kafka_group_result_copy_opaque(const void *src_groupres, void *opaque); /**@}*/ +/** + * @struct Node represents a broker. + * It's the public type. + */ +typedef struct rd_kafka_Node_s { + int id; /*< Node id */ + char *host; /*< Node host */ + uint16_t port; /*< Node port */ + char *rack_id; /*< (optional) Node rack id */ +} rd_kafka_Node_t; + +rd_kafka_Node_t * +rd_kafka_Node_new(int id, const char *host, uint16_t port, const char *rack_id); + +rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src); + +void rd_kafka_Node_destroy(rd_kafka_Node_t *node); + #endif /* _RDKAFKA_AUX_H_ */ diff -Nru librdkafka-1.9.2/src/rdkafka_broker.c librdkafka-2.0.2/src/rdkafka_broker.c --- librdkafka-1.9.2/src/rdkafka_broker.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_broker.c 2023-01-20 09:14:36.000000000 +0000 @@ -64,6 +64,7 @@ #include "rdkafka_interceptor.h" #include "rdkafka_idempotence.h" #include "rdkafka_txnmgr.h" +#include "rdkafka_fetcher.h" #include "rdtime.h" #include "rdcrc32.h" #include "rdrand.h" @@ -370,6 +371,12 @@ if (trigger_monitors) rd_kafka_broker_trigger_monitors(rkb); + /* Call on_broker_state_change interceptors */ + rd_kafka_interceptors_on_broker_state_change( + rkb->rkb_rk, rkb->rkb_nodeid, + rd_kafka_secproto_names[rkb->rkb_proto], rkb->rkb_origname, + rkb->rkb_port, rd_kafka_broker_state_names[rkb->rkb_state]); + rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk); } @@ -980,10 +987,11 @@ if (!rkb->rkb_rsal) { /* Resolve */ - rkb->rkb_rsal = - rd_getaddrinfo(nodename, RD_KAFKA_PORT_STR, AI_ADDRCONFIG, - rkb->rkb_rk->rk_conf.broker_addr_family, - SOCK_STREAM, IPPROTO_TCP, &errstr); + rkb->rkb_rsal = rd_getaddrinfo( + nodename, RD_KAFKA_PORT_STR, AI_ADDRCONFIG, + rkb->rkb_rk->rk_conf.broker_addr_family, SOCK_STREAM, + IPPROTO_TCP, rkb->rkb_rk->rk_conf.resolve_cb, + rkb->rkb_rk->rk_conf.opaque, &errstr); if (!rkb->rkb_rsal) { rd_kafka_broker_fail( @@ -1622,6 +1630,66 @@ /** + * @brief Asynchronously look up current list of broker ids until available. + * Bootstrap and logical brokers are excluded from the list. + * + * To be called repeatedly with an valid eonce until a non-NULL + * list is returned. + * + * @param rk Client instance. + * @param eonce For triggering asynchronously on state change + * in case broker list isn't yet available. + * @return List of int32_t with broker nodeids when ready, NULL when the eonce + * was added to the wait list. + */ +rd_list_t *rd_kafka_brokers_get_nodeids_async(rd_kafka_t *rk, + rd_kafka_enq_once_t *eonce) { + rd_list_t *nodeids = NULL; + int version, i, broker_cnt; + + do { + rd_kafka_broker_t *rkb; + version = rd_kafka_brokers_get_state_version(rk); + + rd_kafka_rdlock(rk); + broker_cnt = rd_atomic32_get(&rk->rk_broker_cnt); + if (nodeids) { + if (broker_cnt > rd_list_cnt(nodeids)) { + rd_list_destroy(nodeids); + /* Will be recreated just after */ + nodeids = NULL; + } else { + rd_list_set_cnt(nodeids, 0); + } + } + if (!nodeids) { + nodeids = rd_list_new(0, NULL); + rd_list_init_int32(nodeids, broker_cnt); + } + i = 0; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_kafka_broker_lock(rkb); + if (rkb->rkb_nodeid != -1 && + !RD_KAFKA_BROKER_IS_LOGICAL(rkb)) { + rd_list_set_int32(nodeids, i++, + rkb->rkb_nodeid); + } + rd_kafka_broker_unlock(rkb); + } + rd_kafka_rdunlock(rk); + + if (!rd_list_empty(nodeids)) + return nodeids; + } while (!rd_kafka_brokers_wait_state_change_async(rk, version, eonce)); + + if (nodeids) { + rd_list_destroy(nodeids); + } + return NULL; /* eonce added to wait list */ +} + + +/** * @returns the current controller using cached metadata information, * and only if the broker's state == \p state. * The reference count is increased for the returned broker. @@ -3796,11 +3864,8 @@ now, flushing ? 1 : rkb->rkb_rk->rk_conf.buffering_max_us, /* Batch message count threshold */ rkb->rkb_rk->rk_conf.batch_num_messages, - /* Batch size threshold. - * When compression is enabled the - * threshold is increased by x8. */ - (rktp->rktp_rkt->rkt_conf.compression_codec ? 1 : 8) * - (int64_t)rkb->rkb_rk->rk_conf.batch_size); + /* Batch total size threshold */ + rkb->rkb_rk->rk_conf.batch_size); } rd_kafka_toppar_unlock(rktp); @@ -3960,11 +4025,8 @@ flushing ? 1 : rkb->rkb_rk->rk_conf.buffering_max_us, /* Batch message count threshold */ rkb->rkb_rk->rk_conf.batch_num_messages, - /* Batch size threshold. - * When compression is enabled the - * threshold is increased by x8. */ - (rktp->rktp_rkt->rkt_conf.compression_codec ? 1 : 8) * - (int64_t)rkb->rkb_rk->rk_conf.batch_size); + /* Batch total size threshold */ + rkb->rkb_rk->rk_conf.batch_size); rd_kafka_toppar_unlock(rktp); } @@ -4093,916 +4155,6 @@ /** - * Backoff the next Fetch request (due to error). - */ -static void rd_kafka_broker_fetch_backoff(rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err) { - int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; - rkb->rkb_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000); - rd_rkb_dbg(rkb, FETCH, "BACKOFF", "Fetch backoff for %dms: %s", - backoff_ms, rd_kafka_err2str(err)); -} - -/** - * @brief Backoff the next Fetch for specific partition - */ -static void rd_kafka_toppar_fetch_backoff(rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - rd_kafka_resp_err_t err) { - int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; - - /* Don't back off on reaching end of partition */ - if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF) - return; - - /* Certain errors that may require manual intervention should have - * a longer backoff time. */ - if (err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) - backoff_ms = RD_MAX(1000, backoff_ms * 10); - - rktp->rktp_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000); - - rd_rkb_dbg(rkb, FETCH, "BACKOFF", - "%s [%" PRId32 "]: Fetch backoff for %dms%s%s", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - backoff_ms, err ? ": " : "", - err ? rd_kafka_err2str(err) : ""); -} - - -/** - * @brief Handle preferred replica in fetch response. - * - * @locks rd_kafka_toppar_lock(rktp) and - * rd_kafka_rdlock(rk) must NOT be held. - * - * @locality broker thread - */ -static void rd_kafka_fetch_preferred_replica_handle(rd_kafka_toppar_t *rktp, - rd_kafka_buf_t *rkbuf, - rd_kafka_broker_t *rkb, - int32_t preferred_id) { - const rd_ts_t one_minute = 60 * 1000 * 1000; - const rd_ts_t five_seconds = 5 * 1000 * 1000; - rd_kafka_broker_t *preferred_rkb; - rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; - rd_ts_t new_intvl = - rd_interval_immediate(&rktp->rktp_new_lease_intvl, one_minute, 0); - - if (new_intvl < 0) { - /* In lieu of KIP-320, the toppar is delegated back to - * the leader in the event of an offset out-of-range - * error (KIP-392 error case #4) because this scenario - * implies the preferred replica is out-of-sync. - * - * If program execution reaches here, the leader has - * relatively quickly instructed the client back to - * a preferred replica, quite possibly the same one - * as before (possibly resulting from stale metadata), - * so we back off the toppar to slow down potential - * back-and-forth. - */ - - if (rd_interval_immediate(&rktp->rktp_new_lease_log_intvl, - one_minute, 0) > 0) - rd_rkb_log(rkb, LOG_NOTICE, "FETCH", - "%.*s [%" PRId32 - "]: preferred replica " - "(%" PRId32 - ") lease changing too quickly " - "(%" PRId64 - "s < 60s): possibly due to " - "unavailable replica or stale cluster " - "state: backing off next fetch", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, preferred_id, - (one_minute - -new_intvl) / (1000 * 1000)); - - rd_kafka_toppar_fetch_backoff(rkb, rktp, - RD_KAFKA_RESP_ERR_NO_ERROR); - } - - rd_kafka_rdlock(rk); - preferred_rkb = rd_kafka_broker_find_by_nodeid(rk, preferred_id); - rd_kafka_rdunlock(rk); - - if (preferred_rkb) { - rd_interval_reset_to_now(&rktp->rktp_lease_intvl, 0); - rd_kafka_toppar_lock(rktp); - rd_kafka_toppar_broker_update(rktp, preferred_id, preferred_rkb, - "preferred replica updated"); - rd_kafka_toppar_unlock(rktp); - rd_kafka_broker_destroy(preferred_rkb); - return; - } - - if (rd_interval_immediate(&rktp->rktp_metadata_intvl, five_seconds, 0) > - 0) { - rd_rkb_log(rkb, LOG_NOTICE, "FETCH", - "%.*s [%" PRId32 "]: preferred replica (%" PRId32 - ") " - "is unknown: refreshing metadata", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, preferred_id); - - rd_kafka_metadata_refresh_brokers( - rktp->rktp_rkt->rkt_rk, NULL, - "preferred replica unavailable"); - } - - rd_kafka_toppar_fetch_backoff(rkb, rktp, - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE); -} - - -/** - * @brief Handle partition-specific Fetch error. - */ -static void rd_kafka_fetch_reply_handle_partition_error( - rd_kafka_broker_t *rkb, - rd_kafka_toppar_t *rktp, - const struct rd_kafka_toppar_ver *tver, - rd_kafka_resp_err_t err, - int64_t HighwaterMarkOffset) { - - /* Some errors should be passed to the - * application while some handled by rdkafka */ - switch (err) { - /* Errors handled by rdkafka */ - case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: - case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION: - case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR: - case RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH: - /* Request metadata information update*/ - rd_kafka_toppar_leader_unavailable(rktp, "fetch", err); - break; - - case RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE: - /* Occurs when: - * - Msg exists on broker but - * offset > HWM, or: - * - HWM is >= offset, but msg not - * yet available at that offset - * (replica is out of sync). - * - * Handle by retrying FETCH (with backoff). - */ - rd_rkb_dbg(rkb, MSG, "FETCH", - "Topic %s [%" PRId32 "]: Offset %" PRId64 - " not " - "available on broker %" PRId32 " (leader %" PRId32 - "): " - "retrying", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rktp->rktp_offsets.fetch_offset, - rktp->rktp_broker_id, rktp->rktp_leader_id); - break; - - case RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE: { - int64_t err_offset; - - if (rktp->rktp_broker_id != rktp->rktp_leader_id && - rktp->rktp_offsets.fetch_offset > HighwaterMarkOffset) { - rd_kafka_log(rkb->rkb_rk, LOG_WARNING, "FETCH", - "Topic %s [%" PRId32 "]: Offset %" PRId64 - " out of range (HighwaterMark %" PRId64 - " fetching from " - "broker %" PRId32 " (leader %" PRId32 - "): " - "reverting to leader", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, - rktp->rktp_offsets.fetch_offset, - HighwaterMarkOffset, rktp->rktp_broker_id, - rktp->rktp_leader_id); - - /* Out of range error cannot be taken as definitive - * when fetching from follower. - * Revert back to the leader in lieu of KIP-320. - */ - rd_kafka_toppar_delegate_to_leader(rktp); - break; - } - - /* Application error */ - err_offset = rktp->rktp_offsets.fetch_offset; - rktp->rktp_offsets.fetch_offset = RD_KAFKA_OFFSET_INVALID; - rd_kafka_offset_reset(rktp, rd_kafka_broker_id(rkb), err_offset, - err, - "fetch failed due to requested offset " - "not available on the broker"); - } break; - - case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED: - /* If we're not authorized to access the - * topic mark it as errored to deny - * further Fetch requests. */ - if (rktp->rktp_last_error != err) { - rd_kafka_consumer_err( - rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err, - tver->version, NULL, rktp, - rktp->rktp_offsets.fetch_offset, - "Fetch from broker %" PRId32 " failed: %s", - rd_kafka_broker_id(rkb), rd_kafka_err2str(err)); - rktp->rktp_last_error = err; - } - break; - - - /* Application errors */ - case RD_KAFKA_RESP_ERR__PARTITION_EOF: - if (rkb->rkb_rk->rk_conf.enable_partition_eof) - rd_kafka_consumer_err(rktp->rktp_fetchq, - rd_kafka_broker_id(rkb), err, - tver->version, NULL, rktp, - rktp->rktp_offsets.fetch_offset, - "Fetch from broker %" PRId32 - " reached end of " - "partition at offset %" PRId64 - " (HighwaterMark %" PRId64 ")", - rd_kafka_broker_id(rkb), - rktp->rktp_offsets.fetch_offset, - HighwaterMarkOffset); - break; - - case RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE: - default: /* and all other errors */ - rd_dassert(tver->version > 0); - rd_kafka_consumer_err( - rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err, - tver->version, NULL, rktp, rktp->rktp_offsets.fetch_offset, - "Fetch from broker %" PRId32 " failed: %s", - rd_kafka_broker_id(rkb), rd_kafka_err2str(err)); - break; - } - - /* Back off the next fetch for this partition */ - rd_kafka_toppar_fetch_backoff(rkb, rktp, err); -} - - - -/** - * Parses and handles a Fetch reply. - * Returns 0 on success or an error code on failure. - */ -static rd_kafka_resp_err_t -rd_kafka_fetch_reply_handle(rd_kafka_broker_t *rkb, - rd_kafka_buf_t *rkbuf, - rd_kafka_buf_t *request) { - int32_t TopicArrayCnt; - int i; - const int log_decode_errors = LOG_ERR; - rd_kafka_topic_t *rkt = NULL; - int16_t ErrorCode = RD_KAFKA_RESP_ERR_NO_ERROR; - - if (rd_kafka_buf_ApiVersion(request) >= 1) { - int32_t Throttle_Time; - rd_kafka_buf_read_i32(rkbuf, &Throttle_Time); - - rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep, - Throttle_Time); - } - - if (rd_kafka_buf_ApiVersion(request) >= 7) { - int32_t SessionId; - rd_kafka_buf_read_i16(rkbuf, &ErrorCode); - rd_kafka_buf_read_i32(rkbuf, &SessionId); - } - - rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); - /* Verify that TopicArrayCnt seems to be in line with remaining size */ - rd_kafka_buf_check_len(rkbuf, - TopicArrayCnt * (3 /*topic min size*/ + - 4 /*PartitionArrayCnt*/ + 4 + - 2 + 8 + 4 /*inner header*/)); - - for (i = 0; i < TopicArrayCnt; i++) { - rd_kafkap_str_t topic; - int32_t fetch_version; - int32_t PartitionArrayCnt; - int j; - - rd_kafka_buf_read_str(rkbuf, &topic); - rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt); - - rkt = rd_kafka_topic_find0(rkb->rkb_rk, &topic); - - for (j = 0; j < PartitionArrayCnt; j++) { - struct rd_kafka_toppar_ver *tver, tver_skel; - rd_kafka_toppar_t *rktp = NULL; - rd_kafka_aborted_txns_t *aborted_txns = NULL; - rd_slice_t save_slice; - struct { - int32_t Partition; - int16_t ErrorCode; - int64_t HighwaterMarkOffset; - int64_t LastStableOffset; /* v4 */ - int64_t LogStartOffset; /* v5 */ - int32_t MessageSetSize; - int32_t PreferredReadReplica; /* v11 */ - } hdr; - rd_kafka_resp_err_t err; - int64_t end_offset; - - rd_kafka_buf_read_i32(rkbuf, &hdr.Partition); - rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode); - if (ErrorCode) - hdr.ErrorCode = ErrorCode; - rd_kafka_buf_read_i64(rkbuf, &hdr.HighwaterMarkOffset); - - end_offset = hdr.HighwaterMarkOffset; - - hdr.LastStableOffset = RD_KAFKA_OFFSET_INVALID; - hdr.LogStartOffset = RD_KAFKA_OFFSET_INVALID; - if (rd_kafka_buf_ApiVersion(request) >= 4) { - int32_t AbortedTxnCnt; - rd_kafka_buf_read_i64(rkbuf, - &hdr.LastStableOffset); - if (rd_kafka_buf_ApiVersion(request) >= 5) - rd_kafka_buf_read_i64( - rkbuf, &hdr.LogStartOffset); - - rd_kafka_buf_read_i32(rkbuf, &AbortedTxnCnt); - - if (rkb->rkb_rk->rk_conf.isolation_level == - RD_KAFKA_READ_UNCOMMITTED) { - - if (unlikely(AbortedTxnCnt > 0)) { - rd_rkb_log( - rkb, LOG_ERR, "FETCH", - "%.*s [%" PRId32 - "]: " - "%" PRId32 - " aborted " - "transaction(s) " - "encountered in " - "READ_UNCOMMITTED " - "fetch response: " - "ignoring.", - RD_KAFKAP_STR_PR(&topic), - hdr.Partition, - AbortedTxnCnt); - - rd_kafka_buf_skip( - rkbuf, - AbortedTxnCnt * (8 + 8)); - } - } else { - /* Older brokers may return LSO -1, - * in which case we use the HWM. */ - if (hdr.LastStableOffset >= 0) - end_offset = - hdr.LastStableOffset; - - if (AbortedTxnCnt > 0) { - int k; - - if (unlikely(AbortedTxnCnt > - 1000000)) - rd_kafka_buf_parse_fail( - rkbuf, - "%.*s [%" PRId32 - "]: " - "invalid " - "AbortedTxnCnt " - "%" PRId32, - RD_KAFKAP_STR_PR( - &topic), - hdr.Partition, - AbortedTxnCnt); - - aborted_txns = - rd_kafka_aborted_txns_new( - AbortedTxnCnt); - for (k = 0; k < AbortedTxnCnt; - k++) { - int64_t PID; - int64_t FirstOffset; - rd_kafka_buf_read_i64( - rkbuf, &PID); - rd_kafka_buf_read_i64( - rkbuf, - &FirstOffset); - rd_kafka_aborted_txns_add( - aborted_txns, PID, - FirstOffset); - } - rd_kafka_aborted_txns_sort( - aborted_txns); - } - } - } - - if (rd_kafka_buf_ApiVersion(request) >= 11) - rd_kafka_buf_read_i32( - rkbuf, &hdr.PreferredReadReplica); - else - hdr.PreferredReadReplica = -1; - - rd_kafka_buf_read_i32(rkbuf, &hdr.MessageSetSize); - - if (unlikely(hdr.MessageSetSize < 0)) - rd_kafka_buf_parse_fail( - rkbuf, - "%.*s [%" PRId32 - "]: " - "invalid MessageSetSize %" PRId32, - RD_KAFKAP_STR_PR(&topic), hdr.Partition, - hdr.MessageSetSize); - - /* Look up topic+partition */ - if (likely(rkt != NULL)) { - rd_kafka_topic_rdlock(rkt); - rktp = rd_kafka_toppar_get(rkt, hdr.Partition, - 0 /*no ua-on-miss*/); - rd_kafka_topic_rdunlock(rkt); - } - - if (unlikely(!rkt || !rktp)) { - rd_rkb_dbg(rkb, TOPIC, "UNKTOPIC", - "Received Fetch response " - "(error %hu) for unknown topic " - "%.*s [%" PRId32 "]: ignoring", - hdr.ErrorCode, - RD_KAFKAP_STR_PR(&topic), - hdr.Partition); - rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); - if (aborted_txns) - rd_kafka_aborted_txns_destroy( - aborted_txns); - continue; - } - - rd_kafka_toppar_lock(rktp); - rktp->rktp_lo_offset = hdr.LogStartOffset; - rktp->rktp_hi_offset = hdr.HighwaterMarkOffset; - /* Let the LastStable offset be the effective - * end_offset based on protocol version, that is: - * if connected to a broker that does not support - * LastStableOffset we use the HighwaterMarkOffset. */ - rktp->rktp_ls_offset = end_offset; - rd_kafka_toppar_unlock(rktp); - - if (hdr.PreferredReadReplica != -1) { - - rd_kafka_fetch_preferred_replica_handle( - rktp, rkbuf, rkb, hdr.PreferredReadReplica); - - if (unlikely(hdr.MessageSetSize != 0)) { - rd_rkb_log( - rkb, LOG_WARNING, "FETCH", - "%.*s [%" PRId32 - "]: Fetch " - "response has both " - "preferred read replica " - "and non-zero message set " - "size: %" PRId32 - ": " - "skipping messages", - RD_KAFKAP_STR_PR( - rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - hdr.MessageSetSize); - rd_kafka_buf_skip(rkbuf, - hdr.MessageSetSize); - } - - if (aborted_txns) - rd_kafka_aborted_txns_destroy( - aborted_txns); - rd_kafka_toppar_destroy(rktp); /* from get */ - continue; - } - - rd_kafka_toppar_lock(rktp); - - /* Make sure toppar hasn't moved to another broker - * during the lifetime of the request. */ - if (unlikely(rktp->rktp_broker != rkb)) { - rd_kafka_toppar_unlock(rktp); - rd_rkb_dbg(rkb, MSG, "FETCH", - "%.*s [%" PRId32 - "]: " - "partition broker has changed: " - "discarding fetch response", - RD_KAFKAP_STR_PR(&topic), - hdr.Partition); - rd_kafka_toppar_destroy(rktp); /* from get */ - rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); - if (aborted_txns) - rd_kafka_aborted_txns_destroy( - aborted_txns); - continue; - } - fetch_version = rktp->rktp_fetch_version; - rd_kafka_toppar_unlock(rktp); - - /* Check if this Fetch is for an outdated fetch version, - * or the original rktp was removed and a new one - * created (due to partition count decreasing and - * then increasing again, which can happen in - * desynchronized clusters): if so ignore it. */ - tver_skel.rktp = rktp; - tver = - rd_list_find(request->rkbuf_rktp_vers, &tver_skel, - rd_kafka_toppar_ver_cmp); - rd_kafka_assert(NULL, tver); - if (tver->rktp != rktp || - tver->version < fetch_version) { - rd_rkb_dbg(rkb, MSG, "DROP", - "%s [%" PRId32 - "]: " - "dropping outdated fetch response " - "(v%d < %d or old rktp)", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, tver->version, - fetch_version); - rd_atomic64_add(&rktp->rktp_c.rx_ver_drops, 1); - rd_kafka_toppar_destroy(rktp); /* from get */ - rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); - if (aborted_txns) - rd_kafka_aborted_txns_destroy( - aborted_txns); - continue; - } - - rd_rkb_dbg( - rkb, MSG, "FETCH", - "Topic %.*s [%" PRId32 - "] MessageSet " - "size %" PRId32 - ", error \"%s\", " - "MaxOffset %" PRId64 - ", " - "LSO %" PRId64 - ", " - "Ver %" PRId32 "/%" PRId32, - RD_KAFKAP_STR_PR(&topic), hdr.Partition, - hdr.MessageSetSize, rd_kafka_err2str(hdr.ErrorCode), - hdr.HighwaterMarkOffset, hdr.LastStableOffset, - tver->version, fetch_version); - - /* If this is the last message of the queue, - * signal EOF back to the application. */ - if (end_offset == rktp->rktp_offsets.fetch_offset && - rktp->rktp_offsets.eof_offset != - rktp->rktp_offsets.fetch_offset) { - hdr.ErrorCode = - RD_KAFKA_RESP_ERR__PARTITION_EOF; - rktp->rktp_offsets.eof_offset = - rktp->rktp_offsets.fetch_offset; - } - - if (unlikely(hdr.ErrorCode != - RD_KAFKA_RESP_ERR_NO_ERROR)) { - /* Handle partition-level errors. */ - rd_kafka_fetch_reply_handle_partition_error( - rkb, rktp, tver, hdr.ErrorCode, - hdr.HighwaterMarkOffset); - - rd_kafka_toppar_destroy(rktp); /* from get()*/ - - rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); - - if (aborted_txns) - rd_kafka_aborted_txns_destroy( - aborted_txns); - continue; - } - - /* No error, clear any previous fetch error. */ - rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR; - - if (unlikely(hdr.MessageSetSize <= 0)) { - rd_kafka_toppar_destroy(rktp); /*from get()*/ - if (aborted_txns) - rd_kafka_aborted_txns_destroy( - aborted_txns); - continue; - } - - /** - * Parse MessageSet - */ - if (!rd_slice_narrow_relative( - &rkbuf->rkbuf_reader, &save_slice, - (size_t)hdr.MessageSetSize)) - rd_kafka_buf_check_len(rkbuf, - hdr.MessageSetSize); - - /* Parse messages */ - err = rd_kafka_msgset_parse(rkbuf, request, rktp, - aborted_txns, tver); - - if (aborted_txns) - rd_kafka_aborted_txns_destroy(aborted_txns); - - rd_slice_widen(&rkbuf->rkbuf_reader, &save_slice); - /* Continue with next partition regardless of - * parse errors (which are partition-specific) */ - - /* On error: back off the fetcher for this partition */ - if (unlikely(err)) - rd_kafka_toppar_fetch_backoff(rkb, rktp, err); - - rd_kafka_toppar_destroy(rktp); /* from get */ - } - - if (rkt) { - rd_kafka_topic_destroy0(rkt); - rkt = NULL; - } - } - - if (rd_kafka_buf_read_remain(rkbuf) != 0) { - rd_kafka_buf_parse_fail(rkbuf, - "Remaining data after message set " - "parse: %" PRIusz " bytes", - rd_kafka_buf_read_remain(rkbuf)); - RD_NOTREACHED(); - } - - return 0; - -err_parse: - if (rkt) - rd_kafka_topic_destroy0(rkt); - rd_rkb_dbg(rkb, MSG, "BADMSG", - "Bad message (Fetch v%d): " - "is broker.version.fallback incorrectly set?", - (int)request->rkbuf_reqhdr.ApiVersion); - return rkbuf->rkbuf_err; -} - - - -static void rd_kafka_broker_fetch_reply(rd_kafka_t *rk, - rd_kafka_broker_t *rkb, - rd_kafka_resp_err_t err, - rd_kafka_buf_t *reply, - rd_kafka_buf_t *request, - void *opaque) { - - if (err == RD_KAFKA_RESP_ERR__DESTROY) - return; /* Terminating */ - - rd_kafka_assert(rkb->rkb_rk, rkb->rkb_fetching > 0); - rkb->rkb_fetching = 0; - - /* Parse and handle the messages (unless the request errored) */ - if (!err && reply) - err = rd_kafka_fetch_reply_handle(rkb, reply, request); - - if (unlikely(err)) { - char tmp[128]; - - rd_rkb_dbg(rkb, MSG, "FETCH", "Fetch reply: %s", - rd_kafka_err2str(err)); - switch (err) { - case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: - case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION: - case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: - case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE: - /* Request metadata information update */ - rd_snprintf(tmp, sizeof(tmp), "FetchRequest failed: %s", - rd_kafka_err2str(err)); - rd_kafka_metadata_refresh_known_topics( - rkb->rkb_rk, NULL, rd_true /*force*/, tmp); - /* FALLTHRU */ - - case RD_KAFKA_RESP_ERR__TRANSPORT: - case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: - case RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: - /* The fetch is already intervalled from - * consumer_serve() so dont retry. */ - break; - - default: - break; - } - - rd_kafka_broker_fetch_backoff(rkb, err); - /* FALLTHRU */ - } -} - - - -/** - * Build and send a Fetch request message for all underflowed toppars - * for a specific broker. - */ -static int rd_kafka_broker_fetch_toppars(rd_kafka_broker_t *rkb, rd_ts_t now) { - rd_kafka_toppar_t *rktp; - rd_kafka_buf_t *rkbuf; - int cnt = 0; - size_t of_TopicArrayCnt = 0; - int TopicArrayCnt = 0; - size_t of_PartitionArrayCnt = 0; - int PartitionArrayCnt = 0; - rd_kafka_topic_t *rkt_last = NULL; - int16_t ApiVersion = 0; - - /* Create buffer and segments: - * 1 x ReplicaId MaxWaitTime MinBytes TopicArrayCnt - * N x topic name - * N x PartitionArrayCnt Partition FetchOffset MaxBytes - * where N = number of toppars. - * Since we dont keep track of the number of topics served by - * this broker, only the partition count, we do a worst-case calc - * when allocating and assume each partition is on its own topic - */ - - if (unlikely(rkb->rkb_active_toppar_cnt == 0)) - return 0; - - rkbuf = rd_kafka_buf_new_request( - rkb, RD_KAFKAP_Fetch, 1, - /* ReplicaId+MaxWaitTime+MinBytes+MaxBytes+IsolationLevel+ - * SessionId+Epoch+TopicCnt */ - 4 + 4 + 4 + 4 + 1 + 4 + 4 + 4 + - /* N x PartCnt+Partition+CurrentLeaderEpoch+FetchOffset+ - * LogStartOffset+MaxBytes+?TopicNameLen?*/ - (rkb->rkb_active_toppar_cnt * (4 + 4 + 4 + 8 + 8 + 4 + 40)) + - /* ForgottenTopicsCnt */ - 4 + - /* N x ForgottenTopicsData */ - 0); - - ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, RD_KAFKAP_Fetch, - 0, 11, NULL); - - if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2) - rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, - RD_KAFKA_FEATURE_MSGVER2); - else if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1) - rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, - RD_KAFKA_FEATURE_MSGVER1); - else if (rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME) - rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, - RD_KAFKA_FEATURE_THROTTLETIME); - - - /* FetchRequest header */ - /* ReplicaId */ - rd_kafka_buf_write_i32(rkbuf, -1); - /* MaxWaitTime */ - rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_wait_max_ms); - /* MinBytes */ - rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_min_bytes); - - if (rd_kafka_buf_ApiVersion(rkbuf) >= 3) - /* MaxBytes */ - rd_kafka_buf_write_i32(rkbuf, - rkb->rkb_rk->rk_conf.fetch_max_bytes); - - if (rd_kafka_buf_ApiVersion(rkbuf) >= 4) - /* IsolationLevel */ - rd_kafka_buf_write_i8(rkbuf, - rkb->rkb_rk->rk_conf.isolation_level); - - if (rd_kafka_buf_ApiVersion(rkbuf) >= 7) { - /* SessionId */ - rd_kafka_buf_write_i32(rkbuf, 0); - /* Epoch */ - rd_kafka_buf_write_i32(rkbuf, -1); - } - - /* Write zero TopicArrayCnt but store pointer for later update */ - of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); - - /* Prepare map for storing the fetch version for each partition, - * this will later be checked in Fetch response to purge outdated - * responses (e.g., after a seek). */ - rkbuf->rkbuf_rktp_vers = - rd_list_new(0, (void *)rd_kafka_toppar_ver_destroy); - rd_list_prealloc_elems(rkbuf->rkbuf_rktp_vers, - sizeof(struct rd_kafka_toppar_ver), - rkb->rkb_active_toppar_cnt, 0); - - /* Round-robin start of the list. */ - rktp = rkb->rkb_active_toppar_next; - do { - struct rd_kafka_toppar_ver *tver; - - if (rkt_last != rktp->rktp_rkt) { - if (rkt_last != NULL) { - /* Update PartitionArrayCnt */ - rd_kafka_buf_update_i32(rkbuf, - of_PartitionArrayCnt, - PartitionArrayCnt); - } - - /* Topic name */ - rd_kafka_buf_write_kstr(rkbuf, - rktp->rktp_rkt->rkt_topic); - TopicArrayCnt++; - rkt_last = rktp->rktp_rkt; - /* Partition count */ - of_PartitionArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); - PartitionArrayCnt = 0; - } - - PartitionArrayCnt++; - - /* Partition */ - rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition); - - if (rd_kafka_buf_ApiVersion(rkbuf) >= 9) - /* CurrentLeaderEpoch */ - rd_kafka_buf_write_i32(rkbuf, -1); - - /* FetchOffset */ - rd_kafka_buf_write_i64(rkbuf, rktp->rktp_offsets.fetch_offset); - - if (rd_kafka_buf_ApiVersion(rkbuf) >= 5) - /* LogStartOffset - only used by follower replica */ - rd_kafka_buf_write_i64(rkbuf, -1); - - /* MaxBytes */ - rd_kafka_buf_write_i32(rkbuf, rktp->rktp_fetch_msg_max_bytes); - - rd_rkb_dbg(rkb, FETCH, "FETCH", - "Fetch topic %.*s [%" PRId32 "] at offset %" PRId64 - " (v%d)", - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - rktp->rktp_offsets.fetch_offset, - rktp->rktp_fetch_version); - - /* We must have a valid fetch offset when we get here */ - rd_dassert(rktp->rktp_offsets.fetch_offset >= 0); - - /* Add toppar + op version mapping. */ - tver = rd_list_add(rkbuf->rkbuf_rktp_vers, NULL); - tver->rktp = rd_kafka_toppar_keep(rktp); - tver->version = rktp->rktp_fetch_version; - - cnt++; - } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, - rktp_activelink)) != - rkb->rkb_active_toppar_next); - - /* Update next toppar to fetch in round-robin list. */ - rd_kafka_broker_active_toppar_next( - rkb, rktp ? CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, - rktp_activelink) - : NULL); - - rd_rkb_dbg(rkb, FETCH, "FETCH", "Fetch %i/%i/%i toppar(s)", cnt, - rkb->rkb_active_toppar_cnt, rkb->rkb_toppar_cnt); - if (!cnt) { - rd_kafka_buf_destroy(rkbuf); - return cnt; - } - - if (rkt_last != NULL) { - /* Update last topic's PartitionArrayCnt */ - rd_kafka_buf_update_i32(rkbuf, of_PartitionArrayCnt, - PartitionArrayCnt); - } - - /* Update TopicArrayCnt */ - rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, TopicArrayCnt); - - - if (rd_kafka_buf_ApiVersion(rkbuf) >= 7) - /* Length of the ForgottenTopics list (KIP-227). Broker - * use only - not used by the consumer. */ - rd_kafka_buf_write_i32(rkbuf, 0); - - if (rd_kafka_buf_ApiVersion(rkbuf) >= 11) - /* RackId */ - rd_kafka_buf_write_kstr(rkbuf, - rkb->rkb_rk->rk_conf.client_rack); - - /* Consider Fetch requests blocking if fetch.wait.max.ms >= 1s */ - if (rkb->rkb_rk->rk_conf.fetch_wait_max_ms >= 1000) - rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING; - - /* Use configured timeout */ - rd_kafka_buf_set_timeout(rkbuf, - rkb->rkb_rk->rk_conf.socket_timeout_ms + - rkb->rkb_rk->rk_conf.fetch_wait_max_ms, - now); - - /* Sort toppar versions for quicker lookups in Fetch response. */ - rd_list_sort(rkbuf->rkbuf_rktp_vers, rd_kafka_toppar_ver_cmp); - - rkb->rkb_fetching = 1; - rd_kafka_broker_buf_enq1(rkb, rkbuf, rd_kafka_broker_fetch_reply, NULL); - - return cnt; -} - - - -/** * Consumer serving */ static void rd_kafka_broker_consumer_serve(rd_kafka_broker_t *rkb, @@ -5702,6 +4854,12 @@ rkb->rkb_nodeid); } + /* Call on_broker_state_change interceptors */ + rd_kafka_interceptors_on_broker_state_change( + rk, rkb->rkb_nodeid, rd_kafka_secproto_names[rkb->rkb_proto], + rkb->rkb_origname, rkb->rkb_port, + rd_kafka_broker_state_names[rkb->rkb_state]); + rd_kafka_broker_unlock(rkb); /* Add broker state monitor for the coordinator request to use. @@ -6676,8 +5834,6 @@ rd_kafka_broker_destroy(rkb); } - - /** * @name Unit tests * @{ diff -Nru librdkafka-1.9.2/src/rdkafka_broker.h librdkafka-2.0.2/src/rdkafka_broker.h --- librdkafka-1.9.2/src/rdkafka_broker.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_broker.h 2023-01-20 09:14:36.000000000 +0000 @@ -452,6 +452,9 @@ int state, rd_kafka_enq_once_t *eonce); +rd_list_t *rd_kafka_brokers_get_nodeids_async(rd_kafka_t *rk, + rd_kafka_enq_once_t *eonce); + rd_kafka_broker_t * rd_kafka_broker_controller(rd_kafka_t *rk, int state, rd_ts_t abs_timeout); rd_kafka_broker_t *rd_kafka_broker_controller_async(rd_kafka_t *rk, diff -Nru librdkafka-1.9.2/src/rdkafka.c librdkafka-2.0.2/src/rdkafka.c --- librdkafka-1.9.2/src/rdkafka.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka.c 2023-01-20 09:14:36.000000000 +0000 @@ -957,6 +957,7 @@ rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying SSL CTX"); rd_kafka_ssl_ctx_term(rk); } + rd_list_destroy(&rk->rk_conf.ssl.loaded_providers); #endif /* It is not safe to log after this point. */ @@ -994,6 +995,7 @@ rd_kafka_anyconf_destroy(_RK_GLOBAL, &rk->rk_conf); rd_list_destroy(&rk->rk_broker_by_id); + mtx_destroy(&rk->rk_conf.sasl.lock); rwlock_destroy(&rk->rk_lock); rd_free(rk); @@ -2204,6 +2206,7 @@ rd_kafka_interceptors_on_new(rk, &rk->rk_conf); rwlock_init(&rk->rk_lock); + mtx_init(&rk->rk_conf.sasl.lock, mtx_plain); mtx_init(&rk->rk_internal_rkb_lock, mtx_plain); cnd_init(&rk->rk_broker_state_change_cnd); @@ -3364,7 +3367,7 @@ * processing the op. */ rko->rko_u.offset_fetch.partitions = rd_kafka_topic_partition_list_copy(partitions); - rko->rko_u.offset_fetch.require_stable = + rko->rko_u.offset_fetch.require_stable_offsets = rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED; rko->rko_u.offset_fetch.do_free = 1; @@ -4545,6 +4548,28 @@ int grplist_size; }; +static const char *rd_kafka_consumer_group_state_names[] = { + "Unknown", "PreparingRebalance", "CompletingRebalance", "Stable", "Dead", + "Empty"}; + +const char * +rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state) { + if (state < 0 || state >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT) + return NULL; + return rd_kafka_consumer_group_state_names[state]; +} + +rd_kafka_consumer_group_state_t +rd_kafka_consumer_group_state_code(const char *name) { + size_t i; + for (i = 0; i < RD_KAFKA_CONSUMER_GROUP_STATE__CNT; i++) { + if (!rd_strcasecmp(rd_kafka_consumer_group_state_names[i], + name)) + return i; + } + return RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN; +} + static void rd_kafka_DescribeGroups_resp_cb(rd_kafka_t *rk, rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, @@ -4725,10 +4750,18 @@ } if (i > 0) { + rd_kafka_error_t *error; + state->wait_cnt++; - rd_kafka_DescribeGroupsRequest( - rkb, (const char **)grps, i, RD_KAFKA_REPLYQ(state->q, 0), + error = rd_kafka_DescribeGroupsRequest( + rkb, 0, grps, i, RD_KAFKA_REPLYQ(state->q, 0), rd_kafka_DescribeGroups_resp_cb, state); + if (error) { + rd_kafka_DescribeGroups_resp_cb( + rk, rkb, rd_kafka_error_code(error), reply, request, + opaque); + rd_kafka_error_destroy(error); + } while (i-- > 0) rd_free(grps[i]); @@ -4789,6 +4822,7 @@ /* Query each broker for its list of groups */ rd_kafka_rdlock(rk); TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_kafka_error_t *error; rd_kafka_broker_lock(rkb); if (rkb->rkb_nodeid == -1 || RD_KAFKA_BROKER_IS_LOGICAL(rkb)) { rd_kafka_broker_unlock(rkb); @@ -4798,8 +4832,15 @@ state.wait_cnt++; rkb_cnt++; - rd_kafka_ListGroupsRequest(rkb, RD_KAFKA_REPLYQ(state.q, 0), - rd_kafka_ListGroups_resp_cb, &state); + error = rd_kafka_ListGroupsRequest( + rkb, 0, NULL, 0, RD_KAFKA_REPLYQ(state.q, 0), + rd_kafka_ListGroups_resp_cb, &state); + if (error) { + rd_kafka_ListGroups_resp_cb(rk, rkb, + rd_kafka_error_code(error), + NULL, NULL, &state); + rd_kafka_error_destroy(error); + } } rd_kafka_rdunlock(rk); diff -Nru librdkafka-1.9.2/src/rdkafka_cert.c librdkafka-2.0.2/src/rdkafka_cert.c --- librdkafka-1.9.2/src/rdkafka_cert.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_cert.c 2023-01-20 09:14:36.000000000 +0000 @@ -95,8 +95,10 @@ return src; } + +#if OPENSSL_VERSION_NUMBER < 0x30000000 /** - * @brief Print the OpenSSL error stack do stdout, for development use. + * @brief Print the OpenSSL error stack to stdout, for development use. */ static RD_UNUSED void rd_kafka_print_ssl_errors(void) { unsigned long l; @@ -121,6 +123,8 @@ flags & ERR_TXT_STRING); } } +#endif + /** * @returns a cert structure with a copy of the memory in \p buffer on success, @@ -150,7 +154,7 @@ [RD_KAFKA_CERT_ENC_DER] = rd_true, [RD_KAFKA_CERT_ENC_PEM] = rd_true}, }; - const char *action = ""; + const char *action = "", *ssl_errstr = NULL, *extra = ""; BIO *bio; rd_kafka_cert_t *cert = NULL; PKCS12 *p12 = NULL; @@ -249,6 +253,8 @@ X509_free(x509); goto fail; } + + X509_free(x509); } break; case RD_KAFKA_CERT_ENC_PEM: { @@ -273,6 +279,7 @@ goto fail; } + X509_free(x509); cnt++; } @@ -397,10 +404,22 @@ return cert; fail: - rd_snprintf(errstr, errstr_size, "Failed to %s %s (encoding %s): %s", + ssl_errstr = rd_kafka_ssl_last_error_str(); + + /* OpenSSL 3.x does not provide obsolete ciphers out of the box, so + * let's try to identify such an error message and guide the user + * to what to do (set up a provider config file and point to it + * through the OPENSSL_CONF environment variable). + * We could call OSSL_PROVIDER_load("legacy") here, but that would be + * a non-obvious side-effect of calling this set function. */ + if (strstr(action, "parse") && strstr(ssl_errstr, "Algorithm")) + extra = + ": legacy ciphers may require loading OpenSSL's \"legacy\" " + "provider through an OPENSSL_CONF configuration file"; + + rd_snprintf(errstr, errstr_size, "Failed to %s %s (encoding %s): %s%s", action, rd_kafka_cert_type_names[type], - rd_kafka_cert_enc_names[encoding], - rd_kafka_ssl_last_error_str()); + rd_kafka_cert_enc_names[encoding], ssl_errstr, extra); if (cert) rd_kafka_cert_destroy(cert); diff -Nru librdkafka-1.9.2/src/rdkafka_cgrp.c librdkafka-2.0.2/src/rdkafka_cgrp.c --- librdkafka-1.9.2/src/rdkafka_cgrp.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_cgrp.c 2023-01-20 09:14:36.000000000 +0000 @@ -2949,8 +2949,8 @@ RD_KAFKA_OP_TYPE_ASSERT(rko_orig, RD_KAFKA_OP_OFFSET_COMMIT); - err = - rd_kafka_handle_OffsetCommit(rk, rkb, err, rkbuf, request, offsets); + err = rd_kafka_handle_OffsetCommit(rk, rkb, err, rkbuf, request, + offsets, rd_false); /* Suppress empty commit debug logs if allowed */ if (err != RD_KAFKA_RESP_ERR__NO_OFFSET || @@ -3091,6 +3091,7 @@ int r; rd_kafka_buf_t *rkbuf; rd_kafka_op_t *reply; + rd_kafka_consumer_group_metadata_t *cgmetadata; if (!(rko->rko_flags & RD_KAFKA_OP_F_REPROCESS)) { /* wait_commit_cnt has already been increased for @@ -3165,10 +3166,17 @@ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state], reason); + cgmetadata = rd_kafka_consumer_group_metadata_new_with_genid( + rkcg->rkcg_rk->rk_conf.group_id_str, rkcg->rkcg_generation_id, + rkcg->rkcg_member_id->str, + rkcg->rkcg_rk->rk_conf.group_instance_id); + /* Send OffsetCommit */ - r = rd_kafka_OffsetCommitRequest( - rkcg->rkcg_coord, rkcg, offsets, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), - rd_kafka_cgrp_op_handle_OffsetCommit, rko, reason); + r = rd_kafka_OffsetCommitRequest(rkcg->rkcg_coord, cgmetadata, offsets, + RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), + rd_kafka_cgrp_op_handle_OffsetCommit, + rko, reason); + rd_kafka_consumer_group_metadata_destroy(cgmetadata); /* Must have valid offsets to commit if we get here */ rd_kafka_assert(NULL, r != 0); @@ -4871,8 +4879,10 @@ } rd_kafka_OffsetFetchRequest( - rkcg->rkcg_coord, rko->rko_u.offset_fetch.partitions, - rko->rko_u.offset_fetch.require_stable, + rkcg->rkcg_coord, rk->rk_group_id->str, + rko->rko_u.offset_fetch.partitions, + rko->rko_u.offset_fetch.require_stable_offsets, + 0, /* Timeout */ RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), rd_kafka_op_handle_OffsetFetch, rko); rko = NULL; /* rko now owned by request */ diff -Nru librdkafka-1.9.2/src/rdkafka_cgrp.h librdkafka-2.0.2/src/rdkafka_cgrp.h --- librdkafka-1.9.2/src/rdkafka_cgrp.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_cgrp.h 2023-01-20 09:14:36.000000000 +0000 @@ -368,7 +368,6 @@ rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup( const rd_kafka_consumer_group_metadata_t *cgmetadata); - static RD_UNUSED const char * rd_kafka_rebalance_protocol2str(rd_kafka_rebalance_protocol_t protocol) { switch (protocol) { diff -Nru librdkafka-1.9.2/src/rdkafka_conf.c librdkafka-2.0.2/src/rdkafka_conf.c --- librdkafka-1.9.2/src/rdkafka_conf.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_conf.c 2023-01-20 09:14:36.000000000 +0000 @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012,2013 Magnus Edenhill + * Copyright (c) 2012-2022 Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -149,6 +149,20 @@ .unsupported = "OpenSSL >= 1.1.0 not available at build time" #endif +#if WITH_SSL_ENGINE +#define _UNSUPPORTED_SSL_ENGINE .unsupported = NULL +#else +#define _UNSUPPORTED_SSL_ENGINE \ + .unsupported = "OpenSSL >= 1.1.x not available at build time" +#endif + +#if OPENSSL_VERSION_NUMBER >= 0x30000000 && defined(WITH_SSL) +#define _UNSUPPORTED_SSL_3 .unsupported = NULL +#else +#define _UNSUPPORTED_SSL_3 \ + .unsupported = "OpenSSL >= 3.0.0 not available at build time" +#endif + #if WITH_ZLIB #define _UNSUPPORTED_ZLIB .unsupported = NULL @@ -669,6 +683,8 @@ rd_kafka_open_cb_generic #endif }, + {_RK_GLOBAL, "resolve_cb", _RK_C_PTR, _RK(resolve_cb), + "Address resolution callback (set with rd_kafka_conf_set_resolve_cb())."}, {_RK_GLOBAL, "opaque", _RK_C_PTR, _RK(opaque), "Application opaque (set with rd_kafka_conf_set_opaque())"}, {_RK_GLOBAL, "default_topic_conf", _RK_C_PTR, _RK(topic_conf), @@ -718,6 +734,20 @@ "Any other value >= 0.10, such as 0.10.2.1, " "enables ApiVersionRequests.", .sdef = "0.10.0", .validate = rd_kafka_conf_validate_broker_version}, + {_RK_GLOBAL, "allow.auto.create.topics", _RK_C_BOOL, + _RK(allow_auto_create_topics), + "Allow automatic topic creation on the broker when subscribing to " + "or assigning non-existent topics. " + "The broker must also be configured with " + "`auto.create.topics.enable=true` for this configuration to " + "take effect. " + "Note: the default value (true) for the producer is " + "different from the default value (false) for the consumer. " + "Further, the consumer default value is different from the Java " + "consumer (true), and this property is not supported by the Java " + "producer. Requires broker version >= 0.11.0.0, for older broker " + "versions only the broker configuration applies.", + 0, 1, 0}, /* Security related global properties */ {_RK_GLOBAL | _RK_HIGH, "security.protocol", _RK_C_S2I, @@ -821,17 +851,24 @@ {_RK_GLOBAL | _RK_SENSITIVE, "ssl.keystore.password", _RK_C_STR, _RK(ssl.keystore_password), "Client's keystore (PKCS#12) password.", _UNSUPPORTED_SSL}, - {_RK_GLOBAL, "ssl.engine.location", _RK_C_STR, _RK(ssl.engine_location), - "Path to OpenSSL engine library. OpenSSL >= 1.1.0 required.", - _UNSUPPORTED_OPENSSL_1_1_0}, + {_RK_GLOBAL, "ssl.providers", _RK_C_STR, _RK(ssl.providers), + "Comma-separated list of OpenSSL 3.0.x implementation providers. " + "E.g., \"default,legacy\".", + _UNSUPPORTED_SSL_3}, + {_RK_GLOBAL | _RK_DEPRECATED, "ssl.engine.location", _RK_C_STR, + _RK(ssl.engine_location), + "Path to OpenSSL engine library. OpenSSL >= 1.1.x required. " + "DEPRECATED: OpenSSL engine support is deprecated and should be " + "replaced by OpenSSL 3 providers.", + _UNSUPPORTED_SSL_ENGINE}, {_RK_GLOBAL, "ssl.engine.id", _RK_C_STR, _RK(ssl.engine_id), "OpenSSL engine id is the name used for loading engine.", - .sdef = "dynamic", _UNSUPPORTED_OPENSSL_1_1_0}, + .sdef = "dynamic", _UNSUPPORTED_SSL_ENGINE}, {_RK_GLOBAL, "ssl_engine_callback_data", _RK_C_PTR, _RK(ssl.engine_callback_data), "OpenSSL engine callback data (set " "with rd_kafka_conf_set_engine_callback_data()).", - _UNSUPPORTED_OPENSSL_1_1_0}, + _UNSUPPORTED_SSL_ENGINE}, {_RK_GLOBAL, "enable.ssl.certificate.verification", _RK_C_BOOL, _RK(ssl.enable_verify), "Enable OpenSSL's builtin broker (server) certificate verification. " @@ -846,7 +883,7 @@ "specified in RFC2818. " "none - No endpoint verification. " "OpenSSL >= 1.0.2 required.", - .vdef = RD_KAFKA_SSL_ENDPOINT_ID_NONE, + .vdef = RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, .s2i = {{RD_KAFKA_SSL_ENDPOINT_ID_NONE, "none"}, {RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, "https"}}, _UNSUPPORTED_OPENSSL_1_0_2}, @@ -1233,18 +1270,6 @@ "on-disk corruption to the messages occurred. This check comes " "at slightly increased CPU usage.", 0, 1, 0}, - {_RK_GLOBAL | _RK_CONSUMER, "allow.auto.create.topics", _RK_C_BOOL, - _RK(allow_auto_create_topics), - "Allow automatic topic creation on the broker when subscribing to " - "or assigning non-existent topics. " - "The broker must also be configured with " - "`auto.create.topics.enable=true` for this configuraiton to " - "take effect. " - "Note: The default value (false) is different from the " - "Java consumer (true). " - "Requires broker version >= 0.11.0.0, for older broker versions " - "only the broker configuration applies.", - 0, 1, 0}, {_RK_GLOBAL, "client.rack", _RK_C_KSTR, _RK(client_rack), "A rack identifier for this client. This can be any string value " "which indicates where this client is physically located. It " @@ -1307,8 +1332,9 @@ {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.messages", _RK_C_INT, _RK(queue_buffering_max_msgs), "Maximum number of messages allowed on the producer queue. " - "This queue is shared by all topics and partitions.", - 1, 10000000, 100000}, + "This queue is shared by all topics and partitions. A value of 0 disables " + "this limit.", + 0, INT_MAX, 100000}, {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.kbytes", _RK_C_INT, _RK(queue_buffering_max_kbytes), "Maximum total message size sum allowed on the producer queue. " @@ -2764,6 +2790,16 @@ } #endif +void rd_kafka_conf_set_resolve_cb( + rd_kafka_conf_t *conf, + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque)) { + rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "resolve_cb", + resolve_cb); +} rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb( rd_kafka_conf_t *conf, diff -Nru librdkafka-1.9.2/src/rdkafka_conf.h librdkafka-2.0.2/src/rdkafka_conf.h --- librdkafka-1.9.2/src/rdkafka_conf.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_conf.h 2023-01-20 09:14:36.000000000 +0000 @@ -32,7 +32,10 @@ #include "rdlist.h" #include "rdkafka_cert.h" -#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 +#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 && \ + !defined(OPENSSL_IS_BORINGSSL) +#define WITH_SSL_ENGINE 1 +/* Deprecated in OpenSSL 3 */ #include #endif /* WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 */ @@ -157,7 +160,7 @@ /* Increase in steps of 64 as needed. * This must be larger than sizeof(rd_kafka_[topic_]conf_t) */ -#define RD_KAFKA_CONF_PROPS_IDX_MAX (64 * 30) +#define RD_KAFKA_CONF_PROPS_IDX_MAX (64 * 33) /** * @struct rd_kafka_anyconf_t @@ -248,6 +251,8 @@ char *engine_location; char *engine_id; void *engine_callback_data; + char *providers; + rd_list_t loaded_providers; /**< (SSL_PROVIDER*) */ char *keystore_location; char *keystore_password; int endpoint_identification; @@ -272,6 +277,9 @@ char *kinit_cmd; char *keytab; int relogin_min_time; + /** Protects .username and .password access after client + * instance has been created (see sasl_set_credentials()). */ + mtx_t lock; char *username; char *password; #if WITH_SASL_SCRAM @@ -310,20 +318,21 @@ /* Interceptors */ struct { /* rd_kafka_interceptor_method_t lists */ - rd_list_t on_conf_set; /* on_conf_set interceptors - * (not copied on conf_dup()) */ - rd_list_t on_conf_dup; /* .. (not copied) */ - rd_list_t on_conf_destroy; /* .. (not copied) */ - rd_list_t on_new; /* .. (copied) */ - rd_list_t on_destroy; /* .. (copied) */ - rd_list_t on_send; /* .. (copied) */ - rd_list_t on_acknowledgement; /* .. (copied) */ - rd_list_t on_consume; /* .. (copied) */ - rd_list_t on_commit; /* .. (copied) */ - rd_list_t on_request_sent; /* .. (copied) */ - rd_list_t on_response_received; /* .. (copied) */ - rd_list_t on_thread_start; /* .. (copied) */ - rd_list_t on_thread_exit; /* .. (copied) */ + rd_list_t on_conf_set; /* on_conf_set interceptors + * (not copied on conf_dup()) */ + rd_list_t on_conf_dup; /* .. (not copied) */ + rd_list_t on_conf_destroy; /* .. (not copied) */ + rd_list_t on_new; /* .. (copied) */ + rd_list_t on_destroy; /* .. (copied) */ + rd_list_t on_send; /* .. (copied) */ + rd_list_t on_acknowledgement; /* .. (copied) */ + rd_list_t on_consume; /* .. (copied) */ + rd_list_t on_commit; /* .. (copied) */ + rd_list_t on_request_sent; /* .. (copied) */ + rd_list_t on_response_received; /* .. (copied) */ + rd_list_t on_thread_start; /* .. (copied) */ + rd_list_t on_thread_exit; /* .. (copied) */ + rd_list_t on_broker_state_change; /* .. (copied) */ /* rd_strtup_t list */ rd_list_t config; /* Configuration name=val's @@ -495,6 +504,13 @@ mode_t mode, void *opaque); + /* Address resolution callback */ + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque); + /* Background queue event callback */ void (*background_event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, diff -Nru librdkafka-1.9.2/src/rdkafka_coord.c librdkafka-2.0.2/src/rdkafka_coord.c --- librdkafka-1.9.2/src/rdkafka_coord.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_coord.c 2023-01-20 09:14:36.000000000 +0000 @@ -196,6 +196,14 @@ static void rd_kafka_coord_req_fsm(rd_kafka_t *rk, rd_kafka_coord_req_t *creq); +/** + * @brief Timer callback for delayed coord requests. + */ +static void rd_kafka_coord_req_tmr_cb(rd_kafka_timers_t *rkts, void *arg) { + rd_kafka_coord_req_t *creq = arg; + + rd_kafka_coord_req_fsm(rkts->rkts_rk, creq); +} /** @@ -207,6 +215,10 @@ * These steps may be performed by this function, or asynchronously * at a later time. * + * @param delay_ms If non-zero, delay scheduling of the coord request + * for this long. The passed \p timeout_ms is automatically + * adjusted to + \p delay_ms. + * * Response, or error, is sent on \p replyq with callback \p rkbuf_cb. * * @locality rdkafka main thread @@ -217,6 +229,7 @@ const char *coordkey, rd_kafka_send_req_cb_t *send_req_cb, rd_kafka_op_t *rko, + int delay_ms, int timeout_ms, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, @@ -226,7 +239,7 @@ creq = rd_calloc(1, sizeof(*creq)); creq->creq_coordtype = coordtype; creq->creq_coordkey = rd_strdup(coordkey); - creq->creq_ts_timeout = rd_timeout_init(timeout_ms); + creq->creq_ts_timeout = rd_timeout_init(delay_ms + timeout_ms); creq->creq_send_req_cb = send_req_cb; creq->creq_rko = rko; creq->creq_replyq = replyq; @@ -238,7 +251,12 @@ TAILQ_INSERT_TAIL(&rk->rk_coord_reqs, creq, creq_link); - rd_kafka_coord_req_fsm(rk, creq); + if (delay_ms) + rd_kafka_timer_start_oneshot(&rk->rk_timers, &creq->creq_tmr, + rd_true, (rd_ts_t)delay_ms * 1000, + rd_kafka_coord_req_tmr_cb, creq); + else + rd_kafka_coord_req_fsm(rk, creq); } @@ -263,6 +281,9 @@ rd_dassert(!creq->creq_done); TAILQ_REMOVE(&rk->rk_coord_reqs, creq, creq_link); creq->creq_done = rd_true; + + rd_kafka_timer_stop(&rk->rk_timers, &creq->creq_tmr, + RD_DO_LOCK); } if (--creq->creq_refcnt > 0) @@ -403,9 +424,6 @@ RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, - RD_KAFKA_ERR_ACTION_RETRY, - RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, - RD_KAFKA_ERR_ACTION_END); if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) { @@ -448,6 +466,12 @@ return; } + /* Do nothing if creq is delayed and the delay time hasn't expired yet. + * We will be called again by the timer once it expires.*/ + if (rd_kafka_timer_next(&rk->rk_timers, &creq->creq_tmr, RD_DO_LOCK) > + 0) + return; + /* Check cache first */ rkb = rd_kafka_coord_cache_get( &rk->rk_coord_cache, creq->creq_coordtype, creq->creq_coordkey); diff -Nru librdkafka-1.9.2/src/rdkafka_coord.h librdkafka-2.0.2/src/rdkafka_coord.h --- librdkafka-1.9.2/src/rdkafka_coord.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_coord.h 2023-01-20 09:14:36.000000000 +0000 @@ -82,6 +82,7 @@ rd_kafka_op_t *creq_rko; /**< Requester's rko that is * provided to creq_send_req_cb * (optional). */ + rd_kafka_timer_t creq_tmr; /**< Delay timer. */ rd_ts_t creq_ts_timeout; /**< Absolute timeout. * Will fail with an error * code pertaining to the @@ -118,6 +119,7 @@ const char *coordkey, rd_kafka_send_req_cb_t *send_req_cb, rd_kafka_op_t *rko, + int delay_ms, int timeout_ms, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, diff -Nru librdkafka-1.9.2/src/rdkafka_error.c librdkafka-2.0.2/src/rdkafka_error.c --- librdkafka-1.9.2/src/rdkafka_error.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_error.c 2023-01-20 09:14:36.000000000 +0000 @@ -80,7 +80,7 @@ ssize_t strsz = 0; if (src->errstr) { - strsz = strlen(src->errstr); + strsz = strlen(src->errstr) + 1; } error = rd_malloc(sizeof(*error) + strsz); @@ -99,6 +99,14 @@ return error; } +/** + * @brief Same as rd_kafka_error_copy() but suitable for + * rd_list_copy(). The \p opaque is ignored. + */ +void *rd_kafka_error_copy_opaque(const void *error, void *opaque) { + return rd_kafka_error_copy(error); +} + rd_kafka_error_t * rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...) { diff -Nru librdkafka-1.9.2/src/rdkafka_error.h librdkafka-2.0.2/src/rdkafka_error.h --- librdkafka-1.9.2/src/rdkafka_error.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_error.h 2023-01-20 09:14:36.000000000 +0000 @@ -55,6 +55,8 @@ rd_kafka_error_t *rd_kafka_error_copy(const rd_kafka_error_t *src); +void *rd_kafka_error_copy_opaque(const void *error, void *opaque); + void rd_kafka_error_set_fatal(rd_kafka_error_t *error); void rd_kafka_error_set_retriable(rd_kafka_error_t *error); void rd_kafka_error_set_txn_requires_abort(rd_kafka_error_t *error); diff -Nru librdkafka-1.9.2/src/rdkafka_event.c librdkafka-2.0.2/src/rdkafka_event.c --- librdkafka-1.9.2/src/rdkafka_event.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_event.c 2023-01-20 09:14:36.000000000 +0000 @@ -64,6 +64,10 @@ return "DescribeConfigsResult"; case RD_KAFKA_EVENT_DELETERECORDS_RESULT: return "DeleteRecordsResult"; + case RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT: + return "ListConsumerGroupsResult"; + case RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT: + return "DescribeConsumerGroupsResult"; case RD_KAFKA_EVENT_DELETEGROUPS_RESULT: return "DeleteGroupsResult"; case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT: @@ -74,6 +78,10 @@ return "DescribeAclsResult"; case RD_KAFKA_EVENT_DELETEACLS_RESULT: return "DeleteAclsResult"; + case RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT: + return "AlterConsumerGroupOffsetsResult"; + case RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT: + return "ListConsumerGroupOffsetsResult"; case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: return "SaslOAuthBearerTokenRefresh"; default: @@ -338,6 +346,24 @@ return (const rd_kafka_DeleteRecords_result_t *)rkev; } +const rd_kafka_ListConsumerGroups_result_t * +rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev) { + if (!rkev || + rkev->rko_evtype != RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) + return NULL; + else + return (const rd_kafka_ListConsumerGroups_result_t *)rkev; +} + +const rd_kafka_DescribeConsumerGroups_result_t * +rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev) { + if (!rkev || + rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) + return NULL; + else + return (const rd_kafka_DescribeConsumerGroups_result_t *)rkev; +} + const rd_kafka_DeleteGroups_result_t * rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev) { if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETEGROUPS_RESULT) @@ -379,3 +405,22 @@ else return (const rd_kafka_DeleteAcls_result_t *)rkev; } + +const rd_kafka_AlterConsumerGroupOffsets_result_t * +rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev) { + if (!rkev || + rkev->rko_evtype != RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT) + return NULL; + else + return ( + const rd_kafka_AlterConsumerGroupOffsets_result_t *)rkev; +} + +const rd_kafka_ListConsumerGroupOffsets_result_t * +rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev) { + if (!rkev || + rkev->rko_evtype != RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT) + return NULL; + else + return (const rd_kafka_ListConsumerGroupOffsets_result_t *)rkev; +} diff -Nru librdkafka-1.9.2/src/rdkafka_event.h librdkafka-2.0.2/src/rdkafka_event.h --- librdkafka-1.9.2/src/rdkafka_event.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_event.h 2023-01-20 09:14:36.000000000 +0000 @@ -100,11 +100,15 @@ case RD_KAFKA_EVENT_ALTERCONFIGS_RESULT: case RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT: case RD_KAFKA_EVENT_DELETERECORDS_RESULT: + case RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT: + case RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT: case RD_KAFKA_EVENT_DELETEGROUPS_RESULT: case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT: case RD_KAFKA_EVENT_CREATEACLS_RESULT: case RD_KAFKA_EVENT_DESCRIBEACLS_RESULT: case RD_KAFKA_EVENT_DELETEACLS_RESULT: + case RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT: + case RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT: case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: return 1; diff -Nru librdkafka-1.9.2/src/rdkafka_fetcher.c librdkafka-2.0.2/src/rdkafka_fetcher.c --- librdkafka-1.9.2/src/rdkafka_fetcher.c 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_fetcher.c 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,1080 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2022 Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * @name Fetcher + * + */ + +#include "rdkafka_int.h" +#include "rdkafka_offset.h" +#include "rdkafka_msgset.h" +#include "rdkafka_fetcher.h" + + +/** + * Backoff the next Fetch request (due to error). + */ +static void rd_kafka_broker_fetch_backoff(rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err) { + int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; + rkb->rkb_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000); + rd_rkb_dbg(rkb, FETCH, "BACKOFF", "Fetch backoff for %dms: %s", + backoff_ms, rd_kafka_err2str(err)); +} + +/** + * @brief Backoff the next Fetch for specific partition + */ +static void rd_kafka_toppar_fetch_backoff(rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + rd_kafka_resp_err_t err) { + int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms; + + /* Don't back off on reaching end of partition */ + if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF) + return; + + /* Certain errors that may require manual intervention should have + * a longer backoff time. */ + if (err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) + backoff_ms = RD_MAX(1000, backoff_ms * 10); + + rktp->rktp_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000); + + rd_rkb_dbg(rkb, FETCH, "BACKOFF", + "%s [%" PRId32 "]: Fetch backoff for %dms%s%s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + backoff_ms, err ? ": " : "", + err ? rd_kafka_err2str(err) : ""); +} + + +/** + * @brief Handle preferred replica in fetch response. + * + * @locks rd_kafka_toppar_lock(rktp) and + * rd_kafka_rdlock(rk) must NOT be held. + * + * @locality broker thread + */ +static void rd_kafka_fetch_preferred_replica_handle(rd_kafka_toppar_t *rktp, + rd_kafka_buf_t *rkbuf, + rd_kafka_broker_t *rkb, + int32_t preferred_id) { + const rd_ts_t one_minute = 60 * 1000 * 1000; + const rd_ts_t five_seconds = 5 * 1000 * 1000; + rd_kafka_broker_t *preferred_rkb; + rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk; + rd_ts_t new_intvl = + rd_interval_immediate(&rktp->rktp_new_lease_intvl, one_minute, 0); + + if (new_intvl < 0) { + /* In lieu of KIP-320, the toppar is delegated back to + * the leader in the event of an offset out-of-range + * error (KIP-392 error case #4) because this scenario + * implies the preferred replica is out-of-sync. + * + * If program execution reaches here, the leader has + * relatively quickly instructed the client back to + * a preferred replica, quite possibly the same one + * as before (possibly resulting from stale metadata), + * so we back off the toppar to slow down potential + * back-and-forth. + */ + + if (rd_interval_immediate(&rktp->rktp_new_lease_log_intvl, + one_minute, 0) > 0) + rd_rkb_log(rkb, LOG_NOTICE, "FETCH", + "%.*s [%" PRId32 + "]: preferred replica " + "(%" PRId32 + ") lease changing too quickly " + "(%" PRId64 + "s < 60s): possibly due to " + "unavailable replica or stale cluster " + "state: backing off next fetch", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, preferred_id, + (one_minute - -new_intvl) / (1000 * 1000)); + + rd_kafka_toppar_fetch_backoff(rkb, rktp, + RD_KAFKA_RESP_ERR_NO_ERROR); + } + + rd_kafka_rdlock(rk); + preferred_rkb = rd_kafka_broker_find_by_nodeid(rk, preferred_id); + rd_kafka_rdunlock(rk); + + if (preferred_rkb) { + rd_interval_reset_to_now(&rktp->rktp_lease_intvl, 0); + rd_kafka_toppar_lock(rktp); + rd_kafka_toppar_broker_update(rktp, preferred_id, preferred_rkb, + "preferred replica updated"); + rd_kafka_toppar_unlock(rktp); + rd_kafka_broker_destroy(preferred_rkb); + return; + } + + if (rd_interval_immediate(&rktp->rktp_metadata_intvl, five_seconds, 0) > + 0) { + rd_rkb_log(rkb, LOG_NOTICE, "FETCH", + "%.*s [%" PRId32 "]: preferred replica (%" PRId32 + ") " + "is unknown: refreshing metadata", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, preferred_id); + + rd_kafka_metadata_refresh_brokers( + rktp->rktp_rkt->rkt_rk, NULL, + "preferred replica unavailable"); + } + + rd_kafka_toppar_fetch_backoff(rkb, rktp, + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE); +} + + +/** + * @brief Handle partition-specific Fetch error. + */ +static void rd_kafka_fetch_reply_handle_partition_error( + rd_kafka_broker_t *rkb, + rd_kafka_toppar_t *rktp, + const struct rd_kafka_toppar_ver *tver, + rd_kafka_resp_err_t err, + int64_t HighwaterMarkOffset) { + + /* Some errors should be passed to the + * application while some handled by rdkafka */ + switch (err) { + /* Errors handled by rdkafka */ + case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: + case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION: + case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR: + case RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH: + /* Request metadata information update*/ + rd_kafka_toppar_leader_unavailable(rktp, "fetch", err); + break; + + case RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE: + /* Occurs when: + * - Msg exists on broker but + * offset > HWM, or: + * - HWM is >= offset, but msg not + * yet available at that offset + * (replica is out of sync). + * + * Handle by retrying FETCH (with backoff). + */ + rd_rkb_dbg(rkb, MSG, "FETCH", + "Topic %s [%" PRId32 "]: Offset %" PRId64 + " not " + "available on broker %" PRId32 " (leader %" PRId32 + "): retrying", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rktp->rktp_offsets.fetch_offset, + rktp->rktp_broker_id, rktp->rktp_leader_id); + break; + + case RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE: { + int64_t err_offset; + + if (rktp->rktp_broker_id != rktp->rktp_leader_id && + rktp->rktp_offsets.fetch_offset > HighwaterMarkOffset) { + rd_kafka_log(rkb->rkb_rk, LOG_WARNING, "FETCH", + "Topic %s [%" PRId32 "]: Offset %" PRId64 + " out of range (HighwaterMark %" PRId64 + " fetching from " + "broker %" PRId32 " (leader %" PRId32 + "): reverting to leader", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, + rktp->rktp_offsets.fetch_offset, + HighwaterMarkOffset, rktp->rktp_broker_id, + rktp->rktp_leader_id); + + /* Out of range error cannot be taken as definitive + * when fetching from follower. + * Revert back to the leader in lieu of KIP-320. + */ + rd_kafka_toppar_delegate_to_leader(rktp); + break; + } + + /* Application error */ + err_offset = rktp->rktp_offsets.fetch_offset; + rktp->rktp_offsets.fetch_offset = RD_KAFKA_OFFSET_INVALID; + rd_kafka_offset_reset(rktp, rd_kafka_broker_id(rkb), err_offset, + err, + "fetch failed due to requested offset " + "not available on the broker"); + } break; + + case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED: + /* If we're not authorized to access the + * topic mark it as errored to deny + * further Fetch requests. */ + if (rktp->rktp_last_error != err) { + rd_kafka_consumer_err( + rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err, + tver->version, NULL, rktp, + rktp->rktp_offsets.fetch_offset, + "Fetch from broker %" PRId32 " failed: %s", + rd_kafka_broker_id(rkb), rd_kafka_err2str(err)); + rktp->rktp_last_error = err; + } + break; + + + /* Application errors */ + case RD_KAFKA_RESP_ERR__PARTITION_EOF: + if (rkb->rkb_rk->rk_conf.enable_partition_eof) + rd_kafka_consumer_err(rktp->rktp_fetchq, + rd_kafka_broker_id(rkb), err, + tver->version, NULL, rktp, + rktp->rktp_offsets.fetch_offset, + "Fetch from broker %" PRId32 + " reached end of " + "partition at offset %" PRId64 + " (HighwaterMark %" PRId64 ")", + rd_kafka_broker_id(rkb), + rktp->rktp_offsets.fetch_offset, + HighwaterMarkOffset); + break; + + case RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE: + default: /* and all other errors */ + rd_dassert(tver->version > 0); + rd_kafka_consumer_err( + rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err, + tver->version, NULL, rktp, rktp->rktp_offsets.fetch_offset, + "Fetch from broker %" PRId32 " failed: %s", + rd_kafka_broker_id(rkb), rd_kafka_err2str(err)); + break; + } + + /* Back off the next fetch for this partition */ + rd_kafka_toppar_fetch_backoff(rkb, rktp, err); +} + + + +/** + * @brief Per-partition FetchResponse parsing and handling. + * + * @returns an error on buffer parse failure, else RD_KAFKA_RESP_ERR_NO_ERROR. + */ +static rd_kafka_resp_err_t +rd_kafka_fetch_reply_handle_partition(rd_kafka_broker_t *rkb, + const rd_kafkap_str_t *topic, + rd_kafka_topic_t *rkt /*possibly NULL*/, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + int16_t ErrorCode) { + const int log_decode_errors = LOG_ERR; + struct rd_kafka_toppar_ver *tver, tver_skel; + rd_kafka_toppar_t *rktp = NULL; + rd_kafka_aborted_txns_t *aborted_txns = NULL; + rd_slice_t save_slice; + int32_t fetch_version; + struct { + int32_t Partition; + int16_t ErrorCode; + int64_t HighwaterMarkOffset; + int64_t LastStableOffset; /* v4 */ + int64_t LogStartOffset; /* v5 */ + int32_t MessageSetSize; + int32_t PreferredReadReplica; /* v11 */ + } hdr; + rd_kafka_resp_err_t err; + int64_t end_offset; + + rd_kafka_buf_read_i32(rkbuf, &hdr.Partition); + rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode); + if (ErrorCode) + hdr.ErrorCode = ErrorCode; + rd_kafka_buf_read_i64(rkbuf, &hdr.HighwaterMarkOffset); + + end_offset = hdr.HighwaterMarkOffset; + + hdr.LastStableOffset = RD_KAFKA_OFFSET_INVALID; + hdr.LogStartOffset = RD_KAFKA_OFFSET_INVALID; + if (rd_kafka_buf_ApiVersion(request) >= 4) { + int32_t AbortedTxnCnt; + rd_kafka_buf_read_i64(rkbuf, &hdr.LastStableOffset); + if (rd_kafka_buf_ApiVersion(request) >= 5) + rd_kafka_buf_read_i64(rkbuf, &hdr.LogStartOffset); + + rd_kafka_buf_read_i32(rkbuf, &AbortedTxnCnt); + + if (rkb->rkb_rk->rk_conf.isolation_level == + RD_KAFKA_READ_UNCOMMITTED) { + + if (unlikely(AbortedTxnCnt > 0)) { + rd_rkb_log(rkb, LOG_ERR, "FETCH", + "%.*s [%" PRId32 + "]: " + "%" PRId32 + " aborted transaction(s) " + "encountered in READ_UNCOMMITTED " + "fetch response: ignoring.", + RD_KAFKAP_STR_PR(topic), + hdr.Partition, AbortedTxnCnt); + + rd_kafka_buf_skip(rkbuf, + AbortedTxnCnt * (8 + 8)); + } + } else { + /* Older brokers may return LSO -1, + * in which case we use the HWM. */ + if (hdr.LastStableOffset >= 0) + end_offset = hdr.LastStableOffset; + + if (AbortedTxnCnt > 0) { + int k; + + if (unlikely(AbortedTxnCnt > 1000000)) + rd_kafka_buf_parse_fail( + rkbuf, + "%.*s [%" PRId32 + "]: " + "invalid AbortedTxnCnt %" PRId32, + RD_KAFKAP_STR_PR(topic), + hdr.Partition, AbortedTxnCnt); + + aborted_txns = + rd_kafka_aborted_txns_new(AbortedTxnCnt); + for (k = 0; k < AbortedTxnCnt; k++) { + int64_t PID; + int64_t FirstOffset; + rd_kafka_buf_read_i64(rkbuf, &PID); + rd_kafka_buf_read_i64(rkbuf, + &FirstOffset); + rd_kafka_aborted_txns_add( + aborted_txns, PID, FirstOffset); + } + rd_kafka_aborted_txns_sort(aborted_txns); + } + } + } + + if (rd_kafka_buf_ApiVersion(request) >= 11) + rd_kafka_buf_read_i32(rkbuf, &hdr.PreferredReadReplica); + else + hdr.PreferredReadReplica = -1; + + rd_kafka_buf_read_i32(rkbuf, &hdr.MessageSetSize); + + if (unlikely(hdr.MessageSetSize < 0)) + rd_kafka_buf_parse_fail( + rkbuf, + "%.*s [%" PRId32 "]: invalid MessageSetSize %" PRId32, + RD_KAFKAP_STR_PR(topic), hdr.Partition, hdr.MessageSetSize); + + /* Look up topic+partition */ + if (likely(rkt != NULL)) { + rd_kafka_topic_rdlock(rkt); + rktp = rd_kafka_toppar_get(rkt, hdr.Partition, + 0 /*no ua-on-miss*/); + rd_kafka_topic_rdunlock(rkt); + } + + if (unlikely(!rkt || !rktp)) { + rd_rkb_dbg(rkb, TOPIC, "UNKTOPIC", + "Received Fetch response (error %hu) for unknown " + "topic %.*s [%" PRId32 "]: ignoring", + hdr.ErrorCode, RD_KAFKAP_STR_PR(topic), + hdr.Partition); + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + if (aborted_txns) + rd_kafka_aborted_txns_destroy(aborted_txns); + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + rd_kafka_toppar_lock(rktp); + rktp->rktp_lo_offset = hdr.LogStartOffset; + rktp->rktp_hi_offset = hdr.HighwaterMarkOffset; + /* Let the LastStable offset be the effective + * end_offset based on protocol version, that is: + * if connected to a broker that does not support + * LastStableOffset we use the HighwaterMarkOffset. */ + rktp->rktp_ls_offset = end_offset; + rd_kafka_toppar_unlock(rktp); + + if (hdr.PreferredReadReplica != -1) { + + rd_kafka_fetch_preferred_replica_handle( + rktp, rkbuf, rkb, hdr.PreferredReadReplica); + + if (unlikely(hdr.MessageSetSize != 0)) { + rd_rkb_log(rkb, LOG_WARNING, "FETCH", + "%.*s [%" PRId32 + "]: Fetch response has both preferred read " + "replica and non-zero message set size: " + "%" PRId32 ": skipping messages", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, hdr.MessageSetSize); + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + } + + if (aborted_txns) + rd_kafka_aborted_txns_destroy(aborted_txns); + rd_kafka_toppar_destroy(rktp); /* from get */ + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + rd_kafka_toppar_lock(rktp); + + /* Make sure toppar hasn't moved to another broker + * during the lifetime of the request. */ + if (unlikely(rktp->rktp_broker != rkb)) { + rd_kafka_toppar_unlock(rktp); + rd_rkb_dbg(rkb, MSG, "FETCH", + "%.*s [%" PRId32 + "]: partition broker has changed: " + "discarding fetch response", + RD_KAFKAP_STR_PR(topic), hdr.Partition); + rd_kafka_toppar_destroy(rktp); /* from get */ + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + if (aborted_txns) + rd_kafka_aborted_txns_destroy(aborted_txns); + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + fetch_version = rktp->rktp_fetch_version; + rd_kafka_toppar_unlock(rktp); + + /* Check if this Fetch is for an outdated fetch version, + * or the original rktp was removed and a new one + * created (due to partition count decreasing and + * then increasing again, which can happen in + * desynchronized clusters): if so ignore it. */ + tver_skel.rktp = rktp; + tver = rd_list_find(request->rkbuf_rktp_vers, &tver_skel, + rd_kafka_toppar_ver_cmp); + rd_kafka_assert(NULL, tver); + if (tver->rktp != rktp || tver->version < fetch_version) { + rd_rkb_dbg(rkb, MSG, "DROP", + "%s [%" PRId32 + "]: dropping outdated fetch response " + "(v%d < %d or old rktp)", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + tver->version, fetch_version); + rd_atomic64_add(&rktp->rktp_c.rx_ver_drops, 1); + rd_kafka_toppar_destroy(rktp); /* from get */ + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + if (aborted_txns) + rd_kafka_aborted_txns_destroy(aborted_txns); + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + rd_rkb_dbg(rkb, MSG, "FETCH", + "Topic %.*s [%" PRId32 "] MessageSet size %" PRId32 + ", error \"%s\", MaxOffset %" PRId64 ", LSO %" PRId64 + ", Ver %" PRId32 "/%" PRId32, + RD_KAFKAP_STR_PR(topic), hdr.Partition, hdr.MessageSetSize, + rd_kafka_err2str(hdr.ErrorCode), hdr.HighwaterMarkOffset, + hdr.LastStableOffset, tver->version, fetch_version); + + /* If this is the last message of the queue, + * signal EOF back to the application. */ + if (end_offset == rktp->rktp_offsets.fetch_offset && + rktp->rktp_offsets.eof_offset != rktp->rktp_offsets.fetch_offset) { + hdr.ErrorCode = RD_KAFKA_RESP_ERR__PARTITION_EOF; + rktp->rktp_offsets.eof_offset = rktp->rktp_offsets.fetch_offset; + } + + if (unlikely(hdr.ErrorCode != RD_KAFKA_RESP_ERR_NO_ERROR)) { + /* Handle partition-level errors. */ + rd_kafka_fetch_reply_handle_partition_error( + rkb, rktp, tver, hdr.ErrorCode, hdr.HighwaterMarkOffset); + + rd_kafka_toppar_destroy(rktp); /* from get()*/ + + rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize); + + if (aborted_txns) + rd_kafka_aborted_txns_destroy(aborted_txns); + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + /* No error, clear any previous fetch error. */ + rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + if (unlikely(hdr.MessageSetSize <= 0)) { + rd_kafka_toppar_destroy(rktp); /*from get()*/ + if (aborted_txns) + rd_kafka_aborted_txns_destroy(aborted_txns); + return RD_KAFKA_RESP_ERR_NO_ERROR; + } + + /** + * Parse MessageSet + */ + if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader, &save_slice, + (size_t)hdr.MessageSetSize)) + rd_kafka_buf_check_len(rkbuf, hdr.MessageSetSize); + + /* Parse messages */ + err = rd_kafka_msgset_parse(rkbuf, request, rktp, aborted_txns, tver); + + if (aborted_txns) + rd_kafka_aborted_txns_destroy(aborted_txns); + + rd_slice_widen(&rkbuf->rkbuf_reader, &save_slice); + /* Continue with next partition regardless of + * parse errors (which are partition-specific) */ + + /* On error: back off the fetcher for this partition */ + if (unlikely(err)) + rd_kafka_toppar_fetch_backoff(rkb, rktp, err); + + rd_kafka_toppar_destroy(rktp); /*from get()*/ + + return RD_KAFKA_RESP_ERR_NO_ERROR; + +err_parse: + if (rktp) + rd_kafka_toppar_destroy(rktp); /*from get()*/ + + return rkbuf->rkbuf_err; +} + +/** + * Parses and handles a Fetch reply. + * Returns 0 on success or an error code on failure. + */ +static rd_kafka_resp_err_t +rd_kafka_fetch_reply_handle(rd_kafka_broker_t *rkb, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request) { + int32_t TopicArrayCnt; + int i; + const int log_decode_errors = LOG_ERR; + rd_kafka_topic_t *rkt = NULL; + int16_t ErrorCode = RD_KAFKA_RESP_ERR_NO_ERROR; + + if (rd_kafka_buf_ApiVersion(request) >= 1) { + int32_t Throttle_Time; + rd_kafka_buf_read_i32(rkbuf, &Throttle_Time); + + rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep, + Throttle_Time); + } + + if (rd_kafka_buf_ApiVersion(request) >= 7) { + int32_t SessionId; + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + rd_kafka_buf_read_i32(rkbuf, &SessionId); + } + + rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); + /* Verify that TopicArrayCnt seems to be in line with remaining size */ + rd_kafka_buf_check_len(rkbuf, + TopicArrayCnt * (3 /*topic min size*/ + + 4 /*PartitionArrayCnt*/ + 4 + + 2 + 8 + 4 /*inner header*/)); + + for (i = 0; i < TopicArrayCnt; i++) { + rd_kafkap_str_t topic; + int32_t PartitionArrayCnt; + int j; + + rd_kafka_buf_read_str(rkbuf, &topic); + rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt); + + rkt = rd_kafka_topic_find0(rkb->rkb_rk, &topic); + + for (j = 0; j < PartitionArrayCnt; j++) { + if (rd_kafka_fetch_reply_handle_partition( + rkb, &topic, rkt, rkbuf, request, ErrorCode)) + goto err_parse; + } + + if (rkt) { + rd_kafka_topic_destroy0(rkt); + rkt = NULL; + } + } + + if (rd_kafka_buf_read_remain(rkbuf) != 0) { + rd_kafka_buf_parse_fail(rkbuf, + "Remaining data after message set " + "parse: %" PRIusz " bytes", + rd_kafka_buf_read_remain(rkbuf)); + RD_NOTREACHED(); + } + + return 0; + +err_parse: + if (rkt) + rd_kafka_topic_destroy0(rkt); + rd_rkb_dbg(rkb, MSG, "BADMSG", + "Bad message (Fetch v%d): " + "is broker.version.fallback incorrectly set?", + (int)request->rkbuf_reqhdr.ApiVersion); + return rkbuf->rkbuf_err; +} + + + +static void rd_kafka_broker_fetch_reply(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *reply, + rd_kafka_buf_t *request, + void *opaque) { + + if (err == RD_KAFKA_RESP_ERR__DESTROY) + return; /* Terminating */ + + rd_kafka_assert(rkb->rkb_rk, rkb->rkb_fetching > 0); + rkb->rkb_fetching = 0; + + /* Parse and handle the messages (unless the request errored) */ + if (!err && reply) + err = rd_kafka_fetch_reply_handle(rkb, reply, request); + + if (unlikely(err)) { + char tmp[128]; + + rd_rkb_dbg(rkb, MSG, "FETCH", "Fetch reply: %s", + rd_kafka_err2str(err)); + switch (err) { + case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART: + case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION: + case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE: + case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE: + /* Request metadata information update */ + rd_snprintf(tmp, sizeof(tmp), "FetchRequest failed: %s", + rd_kafka_err2str(err)); + rd_kafka_metadata_refresh_known_topics( + rkb->rkb_rk, NULL, rd_true /*force*/, tmp); + /* FALLTHRU */ + + case RD_KAFKA_RESP_ERR__TRANSPORT: + case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT: + case RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: + /* The fetch is already intervalled from + * consumer_serve() so dont retry. */ + break; + + default: + break; + } + + rd_kafka_broker_fetch_backoff(rkb, err); + /* FALLTHRU */ + } +} + + + +/** + * @brief Build and send a Fetch request message for all underflowed toppars + * for a specific broker. + * + * @returns the number of partitions included in the FetchRequest, if any. + * + * @locality broker thread + */ +int rd_kafka_broker_fetch_toppars(rd_kafka_broker_t *rkb, rd_ts_t now) { + rd_kafka_toppar_t *rktp; + rd_kafka_buf_t *rkbuf; + int cnt = 0; + size_t of_TopicArrayCnt = 0; + int TopicArrayCnt = 0; + size_t of_PartitionArrayCnt = 0; + int PartitionArrayCnt = 0; + rd_kafka_topic_t *rkt_last = NULL; + int16_t ApiVersion = 0; + + /* Create buffer and segments: + * 1 x ReplicaId MaxWaitTime MinBytes TopicArrayCnt + * N x topic name + * N x PartitionArrayCnt Partition FetchOffset MaxBytes + * where N = number of toppars. + * Since we dont keep track of the number of topics served by + * this broker, only the partition count, we do a worst-case calc + * when allocating and assume each partition is on its own topic + */ + + if (unlikely(rkb->rkb_active_toppar_cnt == 0)) + return 0; + + rkbuf = rd_kafka_buf_new_request( + rkb, RD_KAFKAP_Fetch, 1, + /* ReplicaId+MaxWaitTime+MinBytes+MaxBytes+IsolationLevel+ + * SessionId+Epoch+TopicCnt */ + 4 + 4 + 4 + 4 + 1 + 4 + 4 + 4 + + /* N x PartCnt+Partition+CurrentLeaderEpoch+FetchOffset+ + * LogStartOffset+MaxBytes+?TopicNameLen?*/ + (rkb->rkb_active_toppar_cnt * (4 + 4 + 4 + 8 + 8 + 4 + 40)) + + /* ForgottenTopicsCnt */ + 4 + + /* N x ForgottenTopicsData */ + 0); + + ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, RD_KAFKAP_Fetch, + 0, 11, NULL); + + if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2) + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, + RD_KAFKA_FEATURE_MSGVER2); + else if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1) + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, + RD_KAFKA_FEATURE_MSGVER1); + else if (rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME) + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, + RD_KAFKA_FEATURE_THROTTLETIME); + + + /* FetchRequest header */ + /* ReplicaId */ + rd_kafka_buf_write_i32(rkbuf, -1); + /* MaxWaitTime */ + rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_wait_max_ms); + /* MinBytes */ + rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_min_bytes); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 3) + /* MaxBytes */ + rd_kafka_buf_write_i32(rkbuf, + rkb->rkb_rk->rk_conf.fetch_max_bytes); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 4) + /* IsolationLevel */ + rd_kafka_buf_write_i8(rkbuf, + rkb->rkb_rk->rk_conf.isolation_level); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 7) { + /* SessionId */ + rd_kafka_buf_write_i32(rkbuf, 0); + /* Epoch */ + rd_kafka_buf_write_i32(rkbuf, -1); + } + + /* Write zero TopicArrayCnt but store pointer for later update */ + of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); + + /* Prepare map for storing the fetch version for each partition, + * this will later be checked in Fetch response to purge outdated + * responses (e.g., after a seek). */ + rkbuf->rkbuf_rktp_vers = + rd_list_new(0, (void *)rd_kafka_toppar_ver_destroy); + rd_list_prealloc_elems(rkbuf->rkbuf_rktp_vers, + sizeof(struct rd_kafka_toppar_ver), + rkb->rkb_active_toppar_cnt, 0); + + /* Round-robin start of the list. */ + rktp = rkb->rkb_active_toppar_next; + do { + struct rd_kafka_toppar_ver *tver; + + if (rkt_last != rktp->rktp_rkt) { + if (rkt_last != NULL) { + /* Update PartitionArrayCnt */ + rd_kafka_buf_update_i32(rkbuf, + of_PartitionArrayCnt, + PartitionArrayCnt); + } + + /* Topic name */ + rd_kafka_buf_write_kstr(rkbuf, + rktp->rktp_rkt->rkt_topic); + TopicArrayCnt++; + rkt_last = rktp->rktp_rkt; + /* Partition count */ + of_PartitionArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); + PartitionArrayCnt = 0; + } + + PartitionArrayCnt++; + + /* Partition */ + rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 9) + /* CurrentLeaderEpoch */ + rd_kafka_buf_write_i32(rkbuf, -1); + + /* FetchOffset */ + rd_kafka_buf_write_i64(rkbuf, rktp->rktp_offsets.fetch_offset); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 5) + /* LogStartOffset - only used by follower replica */ + rd_kafka_buf_write_i64(rkbuf, -1); + + /* MaxBytes */ + rd_kafka_buf_write_i32(rkbuf, rktp->rktp_fetch_msg_max_bytes); + + rd_rkb_dbg(rkb, FETCH, "FETCH", + "Fetch topic %.*s [%" PRId32 "] at offset %" PRId64 + " (v%d)", + RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), + rktp->rktp_partition, + rktp->rktp_offsets.fetch_offset, + rktp->rktp_fetch_version); + + /* We must have a valid fetch offset when we get here */ + rd_dassert(rktp->rktp_offsets.fetch_offset >= 0); + + /* Add toppar + op version mapping. */ + tver = rd_list_add(rkbuf->rkbuf_rktp_vers, NULL); + tver->rktp = rd_kafka_toppar_keep(rktp); + tver->version = rktp->rktp_fetch_version; + + cnt++; + } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, + rktp_activelink)) != + rkb->rkb_active_toppar_next); + + /* Update next toppar to fetch in round-robin list. */ + rd_kafka_broker_active_toppar_next( + rkb, rktp ? CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, + rktp_activelink) + : NULL); + + rd_rkb_dbg(rkb, FETCH, "FETCH", "Fetch %i/%i/%i toppar(s)", cnt, + rkb->rkb_active_toppar_cnt, rkb->rkb_toppar_cnt); + if (!cnt) { + rd_kafka_buf_destroy(rkbuf); + return cnt; + } + + if (rkt_last != NULL) { + /* Update last topic's PartitionArrayCnt */ + rd_kafka_buf_update_i32(rkbuf, of_PartitionArrayCnt, + PartitionArrayCnt); + } + + /* Update TopicArrayCnt */ + rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, TopicArrayCnt); + + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 7) + /* Length of the ForgottenTopics list (KIP-227). Broker + * use only - not used by the consumer. */ + rd_kafka_buf_write_i32(rkbuf, 0); + + if (rd_kafka_buf_ApiVersion(rkbuf) >= 11) + /* RackId */ + rd_kafka_buf_write_kstr(rkbuf, + rkb->rkb_rk->rk_conf.client_rack); + + /* Consider Fetch requests blocking if fetch.wait.max.ms >= 1s */ + if (rkb->rkb_rk->rk_conf.fetch_wait_max_ms >= 1000) + rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING; + + /* Use configured timeout */ + rd_kafka_buf_set_timeout(rkbuf, + rkb->rkb_rk->rk_conf.socket_timeout_ms + + rkb->rkb_rk->rk_conf.fetch_wait_max_ms, + now); + + /* Sort toppar versions for quicker lookups in Fetch response. */ + rd_list_sort(rkbuf->rkbuf_rktp_vers, rd_kafka_toppar_ver_cmp); + + rkb->rkb_fetching = 1; + rd_kafka_broker_buf_enq1(rkb, rkbuf, rd_kafka_broker_fetch_reply, NULL); + + return cnt; +} + + + +/** + * @brief Decide whether this toppar should be on the fetch list or not. + * + * Also: + * - update toppar's op version (for broker thread's copy) + * - finalize statistics (move rktp_offsets to rktp_offsets_fin) + * + * @returns the partition's Fetch backoff timestamp, or 0 if no backoff. + * + * @locality broker thread + * @locks none + */ +rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb, + int force_remove) { + int should_fetch = 1; + const char *reason = ""; + int32_t version; + rd_ts_t ts_backoff = 0; + rd_bool_t lease_expired = rd_false; + + rd_kafka_toppar_lock(rktp); + + /* Check for preferred replica lease expiry */ + lease_expired = rktp->rktp_leader_id != rktp->rktp_broker_id && + rd_interval(&rktp->rktp_lease_intvl, + 5 * 60 * 1000 * 1000 /*5 minutes*/, 0) > 0; + if (lease_expired) { + /* delete_to_leader() requires no locks to be held */ + rd_kafka_toppar_unlock(rktp); + rd_kafka_toppar_delegate_to_leader(rktp); + rd_kafka_toppar_lock(rktp); + + reason = "preferred replica lease expired"; + should_fetch = 0; + goto done; + } + + /* Forced removal from fetch list */ + if (unlikely(force_remove)) { + reason = "forced removal"; + should_fetch = 0; + goto done; + } + + if (unlikely((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) != 0)) { + reason = "partition removed"; + should_fetch = 0; + goto done; + } + + /* Skip toppars not in active fetch state */ + if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE) { + reason = "not in active fetch state"; + should_fetch = 0; + goto done; + } + + /* Update broker thread's fetch op version */ + version = rktp->rktp_op_version; + if (version > rktp->rktp_fetch_version || + rktp->rktp_next_offset != rktp->rktp_last_next_offset || + rktp->rktp_offsets.fetch_offset == RD_KAFKA_OFFSET_INVALID) { + /* New version barrier, something was modified from the + * control plane. Reset and start over. + * Alternatively only the next_offset changed but not the + * barrier, which is the case when automatically triggering + * offset.reset (such as on PARTITION_EOF or + * OFFSET_OUT_OF_RANGE). */ + + rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCHDEC", + "Topic %s [%" PRId32 + "]: fetch decide: " + "updating to version %d (was %d) at " + "offset %" PRId64 " (was %" PRId64 ")", + rktp->rktp_rkt->rkt_topic->str, + rktp->rktp_partition, version, + rktp->rktp_fetch_version, rktp->rktp_next_offset, + rktp->rktp_offsets.fetch_offset); + + rd_kafka_offset_stats_reset(&rktp->rktp_offsets); + + /* New start offset */ + rktp->rktp_offsets.fetch_offset = rktp->rktp_next_offset; + rktp->rktp_last_next_offset = rktp->rktp_next_offset; + + rktp->rktp_fetch_version = version; + + /* Clear last error to propagate new fetch + * errors if encountered. */ + rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp, + version); + } + + + if (RD_KAFKA_TOPPAR_IS_PAUSED(rktp)) { + should_fetch = 0; + reason = "paused"; + + } else if (RD_KAFKA_OFFSET_IS_LOGICAL(rktp->rktp_next_offset)) { + should_fetch = 0; + reason = "no concrete offset"; + + } else if (rd_kafka_q_len(rktp->rktp_fetchq) >= + rkb->rkb_rk->rk_conf.queued_min_msgs) { + /* Skip toppars who's local message queue is already above + * the lower threshold. */ + reason = "queued.min.messages exceeded"; + should_fetch = 0; + + } else if ((int64_t)rd_kafka_q_size(rktp->rktp_fetchq) >= + rkb->rkb_rk->rk_conf.queued_max_msg_bytes) { + reason = "queued.max.messages.kbytes exceeded"; + should_fetch = 0; + + } else if (rktp->rktp_ts_fetch_backoff > rd_clock()) { + reason = "fetch backed off"; + ts_backoff = rktp->rktp_ts_fetch_backoff; + should_fetch = 0; + } + +done: + /* Copy offset stats to finalized place holder. */ + rktp->rktp_offsets_fin = rktp->rktp_offsets; + + if (rktp->rktp_fetch != should_fetch) { + rd_rkb_dbg( + rkb, FETCH, "FETCH", + "Topic %s [%" PRId32 + "] in state %s at offset %s " + "(%d/%d msgs, %" PRId64 + "/%d kb queued, " + "opv %" PRId32 ") is %s%s", + rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, + rd_kafka_fetch_states[rktp->rktp_fetch_state], + rd_kafka_offset2str(rktp->rktp_next_offset), + rd_kafka_q_len(rktp->rktp_fetchq), + rkb->rkb_rk->rk_conf.queued_min_msgs, + rd_kafka_q_size(rktp->rktp_fetchq) / 1024, + rkb->rkb_rk->rk_conf.queued_max_msg_kbytes, + rktp->rktp_fetch_version, + should_fetch ? "fetchable" : "not fetchable: ", reason); + + if (should_fetch) { + rd_dassert(rktp->rktp_fetch_version > 0); + rd_kafka_broker_active_toppar_add( + rkb, rktp, *reason ? reason : "fetchable"); + } else { + rd_kafka_broker_active_toppar_del(rkb, rktp, reason); + } + } + + rd_kafka_toppar_unlock(rktp); + + /* Non-fetching partitions will have an + * indefinate backoff, unless explicitly specified. */ + if (!should_fetch && !ts_backoff) + ts_backoff = RD_TS_MAX; + + return ts_backoff; +} diff -Nru librdkafka-1.9.2/src/rdkafka_fetcher.h librdkafka-2.0.2/src/rdkafka_fetcher.h --- librdkafka-1.9.2/src/rdkafka_fetcher.h 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_fetcher.h 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,41 @@ +/* + * librdkafka - The Apache Kafka C/C++ library + * + * Copyright (c) 2022 Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _RDKAFKA_FETCHER_H_ +#define _RDKAFKA_FETCHER_H_ + + +int rd_kafka_broker_fetch_toppars(rd_kafka_broker_t *rkb, rd_ts_t now); + +rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp, + rd_kafka_broker_t *rkb, + int force_remove); + + +#endif /* _RDKAFKA_FETCHER_H_ */ diff -Nru librdkafka-1.9.2/src/rdkafka.h librdkafka-2.0.2/src/rdkafka.h --- librdkafka-1.9.2/src/rdkafka.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka.h 2023-01-20 09:14:36.000000000 +0000 @@ -165,7 +165,7 @@ * @remark This value should only be used during compile time, * for runtime checks of version use rd_kafka_version() */ -#define RD_KAFKA_VERSION 0x010902ff +#define RD_KAFKA_VERSION 0x020002ff /** * @brief Returns the librdkafka version as integer. @@ -1695,7 +1695,7 @@ * Topic-level configuration properties may be set using this interface * in which case they are applied on the \c default_topic_conf. * If no \c default_topic_conf has been set one will be created. - * Any sub-sequent rd_kafka_conf_set_default_topic_conf() calls will + * Any subsequent rd_kafka_conf_set_default_topic_conf() calls will * replace the current default topic configuration. * * @returns \c rd_kafka_conf_res_t to indicate success or failure. @@ -2244,6 +2244,35 @@ int (*open_cb)(const char *pathname, int flags, mode_t mode, void *opaque)); #endif +/** Forward declaration to avoid netdb.h or winsock includes */ +struct addrinfo; + +/** + * @brief Set address resolution callback. + * + * The callback is responsible for resolving the hostname \p node and the + * service \p service into a list of socket addresses as \c getaddrinfo(3) + * would. The \p hints and \p res parameters function as they do for + * \c getaddrinfo(3). The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * If the callback is invoked with a NULL \p node, \p service, and \p hints, the + * callback should instead free the addrinfo struct specified in \p res. In this + * case the callback must succeed; the return value will not be checked by the + * caller. + * + * The callback's return value is interpreted as the return value of \p + * \c getaddrinfo(3). + * + * @remark The callback will be called from an internal librdkafka thread. + */ +RD_EXPORT void +rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque)); /** * @brief Sets the verification callback of the broker certificate @@ -2363,6 +2392,14 @@ * * @remark CA certificate in PEM format may also be set with the * `ssl.ca.pem` configuration property. + * + * @remark When librdkafka is linked to OpenSSL 3.0 and the certificate is + * encoded using an obsolete cipher, it might be necessary to set up + * an OpenSSL configuration file to load the "legacy" provider and + * set the OPENSSL_CONF environment variable. + * See + * https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more + * information. */ RD_EXPORT rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, @@ -2527,9 +2564,8 @@ /** * @name Topic configuration - * @{ - * * @brief Topic configuration property interface + * @{ * */ @@ -2845,7 +2881,7 @@ * \p conf is an optional struct created with `rd_kafka_conf_new()` that will * be used instead of the default configuration. * The \p conf object is freed by this function on success and must not be used - * or destroyed by the application sub-sequently. + * or destroyed by the application subsequently. * See `rd_kafka_conf_set()` et.al for more information. * * \p errstr must be a pointer to memory of at least size \p errstr_size where @@ -2991,7 +3027,7 @@ * `rd_kafka_topic_conf_new()` that will be used instead of the default * topic configuration. * The \p conf object is freed by this function and must not be used or - * destroyed by the application sub-sequently. + * destroyed by the application subsequently. * See `rd_kafka_topic_conf_set()` et.al for more information. * * Topic handles are refcounted internally and calling rd_kafka_topic_new() @@ -3051,22 +3087,22 @@ /** * @brief Polls the provided kafka handle for events. * - * Events will cause application provided callbacks to be called. + * Events will cause application-provided callbacks to be called. * * The \p timeout_ms argument specifies the maximum amount of time * (in milliseconds) that the call will block waiting for events. * For non-blocking calls, provide 0 as \p timeout_ms. - * To wait indefinately for an event, provide -1. + * To wait indefinitely for an event, provide -1. * * @remark An application should make sure to call poll() at regular * intervals to serve any queued callbacks waiting to be called. * @remark If your producer doesn't have any callback set (in particular * via rd_kafka_conf_set_dr_msg_cb or rd_kafka_conf_set_error_cb) - * you might chose not to call poll(), though this is not + * you might choose not to call poll(), though this is not * recommended. * * Events: - * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] + * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] * - error callbacks (rd_kafka_conf_set_error_cb()) [all] * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all] * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all] @@ -3324,6 +3360,25 @@ /** + * @brief Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by + * this Kafka client. + * + * This function sets or resets the SASL username and password credentials + * used by this Kafka client. The new credentials will be used the next time + * this client needs to authenticate to a broker. This function + * will not disconnect existing connections that might have been made using + * the old credentials. + * + * @remark This function only applies to the SASL PLAIN and SCRAM mechanisms. + * + * @returns NULL on success or an error object on error. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, + const char *username, + const char *password); + +/** * @returns a reference to the librdkafka consumer queue. * This is the queue served by rd_kafka_consumer_poll(). * @@ -3764,6 +3819,8 @@ void *commit_opaque); +/**@}*/ + /** * @name Simple Consumer API (legacy): Queue consumers * @{ @@ -3889,8 +3946,8 @@ /** * @name KafkaConsumer (C) - * @{ * @brief High-level KafkaConsumer C API + * @{ * * * @@ -4419,13 +4476,13 @@ #define RD_KAFKA_MSG_F_BLOCK \ 0x4 /**< Block produce*() on message queue full. \ * WARNING: If a delivery report callback \ - * is used the application MUST \ + * is used, the application MUST \ * call rd_kafka_poll() (or equiv.) \ * to make sure delivered messages \ * are drained from the internal \ * delivery report queue. \ * Failure to do so will result \ - * in indefinately blocking on \ + * in indefinitely blocking on \ * the produce() call when the \ * message queue is full. */ #define RD_KAFKA_MSG_F_PARTITION \ @@ -4440,10 +4497,10 @@ * \p rkt is the target topic which must have been previously created with * `rd_kafka_topic_new()`. * - * `rd_kafka_produce()` is an asynch non-blocking API. + * `rd_kafka_produce()` is an asynchronous non-blocking API. * See `rd_kafka_conf_set_dr_msg_cb` on how to setup a callback to be called * once the delivery status (success or failure) is known. The delivery report - * is trigged by the application calling `rd_kafka_poll()` (at regular + * is triggered by the application calling `rd_kafka_poll()` (at regular * intervals) or `rd_kafka_flush()` (at termination). * * Since producing is asynchronous, you should call `rd_kafka_flush()` before @@ -4660,7 +4717,7 @@ * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT. * * @warning Purging messages that are in-flight to or from the broker - * will ignore any sub-sequent acknowledgement for these messages + * will ignore any subsequent acknowledgement for these messages * received from the broker, effectively making it impossible * for the application to know if the messages were successfully * produced or not. This may result in duplicate messages if the @@ -4762,7 +4819,6 @@ char *orig_broker_name; /**< Name of originating broker */ } rd_kafka_metadata_t; - /** * @brief Request Metadata from broker. * @@ -4797,6 +4853,43 @@ RD_EXPORT void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata); +/** + * @brief Node (broker) information. + */ +typedef struct rd_kafka_Node_s rd_kafka_Node_t; + +/** + * @brief Get the id of \p node. + * + * @param node The Node instance. + * + * @return The node id. + */ +RD_EXPORT +int rd_kafka_Node_id(const rd_kafka_Node_t *node); + +/** + * @brief Get the host of \p node. + * + * @param node The Node instance. + * + * @return The node host. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p node object. + */ +RD_EXPORT +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node); + +/** + * @brief Get the port of \p node. + * + * @param node The Node instance. + * + * @return The node port. + */ +RD_EXPORT +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node); /**@}*/ @@ -4830,6 +4923,21 @@ }; /** + * @enum rd_kafka_consumer_group_state_t + * + * @brief Consumer group state. + */ +typedef enum { + RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0, + RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1, + RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2, + RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3, + RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4, + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5, + RD_KAFKA_CONSUMER_GROUP_STATE__CNT +} rd_kafka_consumer_group_state_t; + +/** * @brief Group information */ struct rd_kafka_group_info { @@ -4857,7 +4965,7 @@ /** * @brief List and describe client groups in cluster. * - * \p group is an optional group name to describe, otherwise (\p NULL) all + * \p group is an optional group name to describe, otherwise (\c NULL) all * groups are returned. * * \p timeout_ms is the (approximate) maximum time to wait for response @@ -4880,6 +4988,9 @@ * group list. * * @sa Use rd_kafka_group_list_destroy() to release list memory. + * + * @deprecated Use rd_kafka_ListConsumerGroups() and + * rd_kafka_DescribeConsumerGroups() instead. */ RD_EXPORT rd_kafka_resp_err_t @@ -4889,6 +5000,28 @@ int timeout_ms); /** + * @brief Returns a name for a state code. + * + * @param state The state value. + * + * @return The group state name corresponding to the provided group state value. + */ +RD_EXPORT +const char * +rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state); + +/** + * @brief Returns a code for a state name. + * + * @param name The state name. + * + * @return The group state value corresponding to the provided group state name. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t +rd_kafka_consumer_group_state_code(const char *name); + +/** * @brief Release list memory */ RD_EXPORT @@ -5141,6 +5274,15 @@ #define RD_KAFKA_EVENT_CREATEACLS_RESULT 0x400 /**< CreateAcls_result_t */ #define RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 0x800 /**< DescribeAcls_result_t */ #define RD_KAFKA_EVENT_DELETEACLS_RESULT 0x1000 /**< DeleteAcls_result_t */ +/** ListConsumerGroupsResult_t */ +#define RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT 0x2000 +/** DescribeConsumerGroups_result_t */ +#define RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT 0x4000 +/** ListConsumerGroupOffsets_result_t */ +#define RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT 0x8000 +/** AlterConsumerGroupOffsets_result_t */ +#define RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT 0x10000 + /** * @returns the event type for the given event. @@ -5291,6 +5433,10 @@ * - RD_KAFKA_EVENT_DELETEGROUPS_RESULT * - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT * - RD_KAFKA_EVENT_DELETERECORDS_RESULT + * - RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + * - RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + * - RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + * - RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT */ RD_EXPORT void *rd_kafka_event_opaque(rd_kafka_event_t *rkev); @@ -5390,10 +5536,18 @@ typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t; /*! DeleteRecords result type */ typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t; +/*! ListConsumerGroups result type */ +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t; +/*! DescribeConsumerGroups result type */ +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t; /*! DeleteGroups result type */ typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t; /*! DeleteConsumerGroupOffsets result type */ typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t; +/*! AlterConsumerGroupOffsets result type */ +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t; +/*! ListConsumerGroupOffsets result type */ +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t; /** * @brief Get CreateTopics result. @@ -5466,6 +5620,36 @@ rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev); /** + * @brief Get ListConsumerGroups result. + * + * @returns the result of a ListConsumerGroups request, or NULL if event is of + * different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + */ +RD_EXPORT const rd_kafka_ListConsumerGroups_result_t * +rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeConsumerGroups result. + * + * @returns the result of a DescribeConsumerGroups request, or NULL if event is + * of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeConsumerGroups_result_t * +rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev); + +/** * @brief Get DeleteGroups result. * * @returns the result of a DeleteGroups request, or NULL if event is of @@ -5520,6 +5704,36 @@ rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev); /** + * @brief Get AlterConsumerGroupOffsets result. + * + * @returns the result of a AlterConsumerGroupOffsets request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_AlterConsumerGroupOffsets_result_t * +rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev); + +/** + * @brief Get ListConsumerGroupOffsets result. + * + * @returns the result of a ListConsumerGroupOffsets request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_ListConsumerGroupOffsets_result_t * +rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev); + +/** * @brief Poll a queue for an event for max \p timeout_ms. * * @returns an event, or NULL. @@ -5567,6 +5781,7 @@ * and not statically. Failure to do so will lead to missing symbols * or finding symbols in another librdkafka library than the * application was linked with. + * @{ */ @@ -5985,6 +6200,28 @@ void *ic_opaque); +/** + * @brief on_broker_state_change() is called just after a broker + * has been created or its state has been changed. + * + * @param rk The client instance. + * @param broker_id The broker id (-1 is used for bootstrap brokers). + * @param secproto The security protocol. + * @param name The original name of the broker. + * @param port The port of the broker. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_broker_state_change_t)( + rd_kafka_t *rk, + int32_t broker_id, + const char *secproto, + const char *name, + int port, + const char *state, + void *ic_opaque); + /** * @brief Append an on_conf_set() interceptor. @@ -5995,7 +6232,7 @@ * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set( @@ -6014,7 +6251,7 @@ * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup( @@ -6061,7 +6298,7 @@ * has not already been added. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t @@ -6081,7 +6318,7 @@ * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy( @@ -6118,7 +6355,7 @@ * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement( @@ -6137,7 +6374,7 @@ * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume( @@ -6156,7 +6393,7 @@ * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit( @@ -6175,7 +6412,7 @@ * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent( @@ -6194,7 +6431,7 @@ * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received( @@ -6213,7 +6450,7 @@ * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start( @@ -6232,7 +6469,7 @@ * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit( @@ -6242,6 +6479,26 @@ void *ic_opaque); +/** + * @brief Append an on_broker_state_change() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_broker_state_change() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, + void *ic_opaque); + + /**@}*/ @@ -6369,10 +6626,16 @@ RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */ /** DeleteConsumerGroupOffsets */ RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, - RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */ - RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */ - RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */ - RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ + RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */ + RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */ + RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, /**< ListConsumerGroups */ + RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, /**< DescribeConsumerGroups */ + /** ListConsumerGroupOffsets */ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, + /** AlterConsumerGroupOffsets */ + RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS, + RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ } rd_kafka_admin_op_t; /** @@ -6533,6 +6796,40 @@ size_t errstr_size); +/** + * @brief Whether broker should return stable offsets + * (transaction-committed). + * + * @param options Admin options. + * @param true_or_false Defaults to false. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for ListConsumerGroupOffsets. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets( + rd_kafka_AdminOptions_t *options, + int true_or_false); + +/** + * @brief Set consumer groups states to query for. + * + * @param options Admin options. + * @param consumer_group_states Array of consumer group states. + * @param consumer_group_states_cnt Size of the \p consumer_group_states array. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for ListConsumerGroups. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states( + rd_kafka_AdminOptions_t *options, + const rd_kafka_consumer_group_state_t *consumer_group_states, + size_t consumer_group_states_cnt); /** * @brief Set application opaque value that can be extracted from the @@ -6542,10 +6839,12 @@ rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque); +/**@}*/ - -/* - * CreateTopics - create topics in cluster. +/** + * @name Admin API - Topics + * @brief Topic related operations. + * @{ * */ @@ -6759,9 +7058,12 @@ size_t *cntp); +/**@}*/ -/* - * CreatePartitions - add partitions to topic. +/** + * @name Admin API - Partitions + * @brief Partition related operations. + * @{ * */ @@ -6880,10 +7182,12 @@ const rd_kafka_CreatePartitions_result_t *result, size_t *cntp); +/**@}*/ - -/* - * Cluster, broker, topic configuration entries, sources, etc. +/** + * @name Admin API - Configuration + * @brief Cluster, broker, topic configuration entries, sources, etc. + * @{ * */ @@ -7248,9 +7552,12 @@ size_t *cntp); -/* - * DeleteRecords - delete records (messages) from partitions - * +/**@}*/ + +/** + * @name Admin API - DeleteRecords + * @brief delete records (messages) from partitions. + * @{ * */ @@ -7337,97 +7644,660 @@ rd_kafka_DeleteRecords_result_offsets( const rd_kafka_DeleteRecords_result_t *result); -/* - * DeleteGroups - delete groups from cluster - * - * - */ - -/*! Represents a group to be deleted. */ -typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t; +/**@}*/ /** - * @brief Create a new DeleteGroup object. This object is later passed to - * rd_kafka_DeleteGroups(). - * - * @param group Name of group to delete. - * - * @returns a new allocated DeleteGroup object. - * Use rd_kafka_DeleteGroup_destroy() to free object when done. + * @name Admin API - ListConsumerGroups + * @{ */ -RD_EXPORT rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group); -/** - * @brief Destroy and free a DeleteGroup object previously created with - * rd_kafka_DeleteGroup_new() - */ -RD_EXPORT void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group); /** - * @brief Helper function to destroy all DeleteGroup objects in - * the \p del_groups array (of \p del_group_cnt elements). - * The array itself is not freed. + * @brief ListConsumerGroups result for a single group */ -RD_EXPORT void -rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, - size_t del_group_cnt); + +/**! ListConsumerGroups result for a single group */ +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t; + +/**! ListConsumerGroups results and errors */ +typedef struct rd_kafka_ListConsumerGroupsResult_s + rd_kafka_ListConsumerGroupsResult_t; /** - * @brief Delete groups from cluster as specified by the \p del_groups - * array of size \p del_group_cnt elements. + * @brief List the consumer groups available in the cluster. * * @param rk Client instance. - * @param del_groups Array of groups to delete. - * @param del_group_cnt Number of elements in \p del_groups array. * @param options Optional admin options, or NULL for defaults. * @param rkqu Queue to emit result on. * * @remark The result event type emitted on the supplied queue is of type - * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT + * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT */ RD_EXPORT -void rd_kafka_DeleteGroups(rd_kafka_t *rk, - rd_kafka_DeleteGroup_t **del_groups, - size_t del_group_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); +/** + * @brief Gets the group id for the \p grplist group. + * + * @param grplist The group listing. + * + * @return The group id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grplist object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupListing_group_id( + const rd_kafka_ConsumerGroupListing_t *grplist); +/** + * @brief Is the \p grplist group a simple consumer group. + * + * @param grplist The group listing. + * + * @return 1 if the group is a simple consumer group, + * else 0. + */ +RD_EXPORT +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group( + const rd_kafka_ConsumerGroupListing_t *grplist); -/* - * DeleteGroups result type and methods +/** + * @brief Gets state for the \p grplist group. + * + * @param grplist The group listing. + * + * @return A group state. */ +RD_EXPORT +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state( + const rd_kafka_ConsumerGroupListing_t *grplist); /** - * @brief Get an array of group results from a DeleteGroups result. + * @brief Get an array of valid list groups from a ListConsumerGroups result. * * The returned groups life-time is the same as the \p result object. * * @param result Result to get group results from. * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. */ -RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( - const rd_kafka_DeleteGroups_result_t *result, +RD_EXPORT +const rd_kafka_ConsumerGroupListing_t ** +rd_kafka_ListConsumerGroups_result_valid( + const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp); - -/* - * DeleteConsumerGroupOffsets - delete groups from cluster +/** + * @brief Get an array of errors from a ListConsumerGroups call result. + * + * The returned errors life-time is the same as the \p result object. + * + * @param result ListConsumerGroups result. + * @param cntp Is updated to the number of elements in the array. * + * @return Array of errors in \p result. * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. */ +RD_EXPORT +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp); -/*! Represents consumer group committed offsets to be deleted. */ -typedef struct rd_kafka_DeleteConsumerGroupOffsets_s - rd_kafka_DeleteConsumerGroupOffsets_t; +/**@}*/ /** - * @brief Create a new DeleteConsumerGroupOffsets object. - * This object is later passed to rd_kafka_DeleteConsumerGroupOffsets(). - * - * @param group Consumer group id. - * @param partitions Partitions to delete committed offsets for. - * Only the topic and partition fields are used. + * @name Admin API - DescribeConsumerGroups + * @{ + */ + +/** + * @brief DescribeConsumerGroups result type. + * + */ +typedef struct rd_kafka_ConsumerGroupDescription_s + rd_kafka_ConsumerGroupDescription_t; + +/** + * @brief Member description included in ConsumerGroupDescription. + * + */ +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t; + +/** + * @brief Member assignment included in MemberDescription. + * + */ +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t; + +/** + * @brief Describe groups from cluster as specified by the \p groups + * array of size \p groups_cnt elements. + * + * @param rk Client instance. + * @param groups Array of groups to describe. + * @param groups_cnt Number of elements in \p groups array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + */ +RD_EXPORT +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, + const char **groups, + size_t groups_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Get an array of group results from a DescribeConsumerGroups result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_ConsumerGroupDescription_t ** +rd_kafka_DescribeConsumerGroups_result_groups( + const rd_kafka_DescribeConsumerGroups_result_t *result, + size_t *cntp); + + +/** + * @brief Gets the group id for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupDescription_group_id( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the error for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group description error. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Is the \p grpdesc group a simple consumer group. + * + * @param grpdesc The group description. + * @return 1 if the group is a simple consumer group, + * else 0. + */ +RD_EXPORT +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + + +/** + * @brief Gets the partition assignor for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The partition assignor. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupDescription_partition_assignor( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + + +/** + * @brief Gets state for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return A group state. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the coordinator for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group coordinator. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the members count of \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The member count. + */ +RD_EXPORT +size_t rd_kafka_ConsumerGroupDescription_member_count( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets a member of \p grpdesc group. + * + * @param grpdesc The group description. + * @param idx The member idx. + * + * @return A member at index \p idx, or NULL if + * \p idx is out of range. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member( + const rd_kafka_ConsumerGroupDescription_t *grpdesc, + size_t idx); + +/** + * @brief Gets client id of \p member. + * + * @param member The group member. + * + * @return The client id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_client_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets group instance id of \p member. + * + * @param member The group member. + * + * @return The group instance id, or NULL if not available. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_group_instance_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets consumer id of \p member. + * + * @param member The group member. + * + * @return The consumer id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_consumer_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets host of \p member. + * + * @param member The group member. + * + * @return The host. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char * +rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets assignment of \p member. + * + * @param member The group member. + * + * @return The member assignment. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets assigned partitions of a member \p assignment. + * + * @param assignment The group member assignment. + * + * @return The assigned partitions. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p assignment object. + */ +RD_EXPORT +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions( + const rd_kafka_MemberAssignment_t *assignment); + +/**@}*/ + +/** + * @name Admin API - DeleteGroups + * @brief Delete groups from cluster + * @{ + * + * + */ + +/*! Represents a group to be deleted. */ +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t; + +/** + * @brief Create a new DeleteGroup object. This object is later passed to + * rd_kafka_DeleteGroups(). + * + * @param group Name of group to delete. + * + * @returns a new allocated DeleteGroup object. + * Use rd_kafka_DeleteGroup_destroy() to free object when done. + */ +RD_EXPORT +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group); + +/** + * @brief Destroy and free a DeleteGroup object previously created with + * rd_kafka_DeleteGroup_new() + */ +RD_EXPORT +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group); + +/** + * @brief Helper function to destroy all DeleteGroup objects in + * the \p del_groups array (of \p del_group_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt); + +/** + * @brief Delete groups from cluster as specified by the \p del_groups + * array of size \p del_group_cnt elements. + * + * @param rk Client instance. + * @param del_groups Array of groups to delete. + * @param del_group_cnt Number of elements in \p del_groups array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT + * + * @remark This function in called deleteConsumerGroups in the Java client. + */ +RD_EXPORT +void rd_kafka_DeleteGroups(rd_kafka_t *rk, + rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * DeleteGroups result type and methods + */ + +/** + * @brief Get an array of group results from a DeleteGroups result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( + const rd_kafka_DeleteGroups_result_t *result, + size_t *cntp); + +/**@}*/ + +/** + * @name Admin API - ListConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be listed. */ +typedef struct rd_kafka_ListConsumerGroupOffsets_s + rd_kafka_ListConsumerGroupOffsets_t; + +/** + * @brief Create a new ListConsumerGroupOffsets object. + * This object is later passed to rd_kafka_ListConsumerGroupOffsets(). + * + * @param group_id Consumer group id. + * @param partitions Partitions to list committed offsets for. + * Only the topic and partition fields are used. + * + * @returns a new allocated ListConsumerGroupOffsets object. + * Use rd_kafka_ListConsumerGroupOffsets_destroy() to free + * object when done. + */ +RD_EXPORT rd_kafka_ListConsumerGroupOffsets_t * +rd_kafka_ListConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a ListConsumerGroupOffsets object previously + * created with rd_kafka_ListConsumerGroupOffsets_new() + */ +RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy( + rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets); + +/** + * @brief Helper function to destroy all ListConsumerGroupOffsets objects in + * the \p list_grpoffsets array (of \p list_grpoffsets_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy_array( + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffset_cnt); + +/** + * @brief List committed offsets for a set of partitions in a consumer + * group. + * + * @param rk Client instance. + * @param list_grpoffsets Array of group committed offsets to list. + * MUST only be one single element. + * @param list_grpoffsets_cnt Number of elements in \p list_grpoffsets array. + * MUST always be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + * + * @remark The current implementation only supports one group per invocation. + */ +RD_EXPORT +void rd_kafka_ListConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * ListConsumerGroupOffsets result type and methods + */ + +/** + * @brief Get an array of results from a ListConsumerGroupOffsets result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT const rd_kafka_group_result_t ** +rd_kafka_ListConsumerGroupOffsets_result_groups( + const rd_kafka_ListConsumerGroupOffsets_result_t *result, + size_t *cntp); + + + +/**@}*/ + +/** + * @name Admin API - AlterConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be altered. */ +typedef struct rd_kafka_AlterConsumerGroupOffsets_s + rd_kafka_AlterConsumerGroupOffsets_t; + +/** + * @brief Create a new AlterConsumerGroupOffsets object. + * This object is later passed to rd_kafka_AlterConsumerGroupOffsets(). + * + * @param group_id Consumer group id. + * @param partitions Partitions to alter committed offsets for. + * Only the topic and partition fields are used. + * + * @returns a new allocated AlterConsumerGroupOffsets object. + * Use rd_kafka_AlterConsumerGroupOffsets_destroy() to free + * object when done. + */ +RD_EXPORT rd_kafka_AlterConsumerGroupOffsets_t * +rd_kafka_AlterConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a AlterConsumerGroupOffsets object previously + * created with rd_kafka_AlterConsumerGroupOffsets_new() + */ +RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy( + rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets); + +/** + * @brief Helper function to destroy all AlterConsumerGroupOffsets objects in + * the \p alter_grpoffsets array (of \p alter_grpoffsets_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy_array( + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffset_cnt); + +/** + * @brief Alter committed offsets for a set of partitions in a consumer + * group. This will succeed at the partition level only if the group + * is not actively subscribed to the corresponding topic. + * + * @param rk Client instance. + * @param alter_grpoffsets Array of group committed offsets to alter. + * MUST only be one single element. + * @param alter_grpoffsets_cnt Number of elements in \p alter_grpoffsets array. + * MUST always be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT + * + * @remark The current implementation only supports one group per invocation. + */ +RD_EXPORT +void rd_kafka_AlterConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * AlterConsumerGroupOffsets result type and methods + */ + +/** + * @brief Get an array of results from a AlterConsumerGroupOffsets result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT const rd_kafka_group_result_t ** +rd_kafka_AlterConsumerGroupOffsets_result_groups( + const rd_kafka_AlterConsumerGroupOffsets_result_t *result, + size_t *cntp); + + + +/**@}*/ + +/** + * @name Admin API - DeleteConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be deleted. */ +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s + rd_kafka_DeleteConsumerGroupOffsets_t; + +/** + * @brief Create a new DeleteConsumerGroupOffsets object. + * This object is later passed to rd_kafka_DeleteConsumerGroupOffsets(). + * + * @param group Consumer group id. + * @param partitions Partitions to delete committed offsets for. + * Only the topic and partition fields are used. * * @returns a new allocated DeleteConsumerGroupOffsets object. * Use rd_kafka_DeleteConsumerGroupOffsets_destroy() to free @@ -7455,7 +8325,7 @@ size_t del_grpoffset_cnt); /** - * @brief Delete committed offsets for a set of partitions in a conusmer + * @brief Delete committed offsets for a set of partitions in a consumer * group. This will succeed at the partition level only if the group * is not actively subscribed to the corresponding topic. * @@ -7499,6 +8369,13 @@ const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp); +/**@}*/ + +/** + * @name Admin API - ACL operations + * @{ + */ + /** * @brief ACL Binding is used to create access control lists. * @@ -7520,11 +8397,6 @@ /** - * @name AclOperation - * @{ - */ - -/** * @enum rd_kafka_AclOperation_t * @brief Apache Kafka ACL operation types. */ @@ -7556,13 +8428,6 @@ RD_EXPORT const char * rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation); -/**@}*/ - -/** - * @name AclPermissionType - * @{ - */ - /** * @enum rd_kafka_AclPermissionType_t * @brief Apache Kafka ACL permission types. @@ -7582,8 +8447,6 @@ RD_EXPORT const char *rd_kafka_AclPermissionType_name( rd_kafka_AclPermissionType_t acl_permission_type); -/**@}*/ - /** * @brief Create a new AclBinding object. This object is later passed to * rd_kafka_CreateAcls(). @@ -7754,7 +8617,7 @@ rd_kafka_queue_t *rkqu); /** - * @section DescribeAcls - describe access control lists. + * DescribeAcls - describe access control lists. * * */ @@ -7790,7 +8653,7 @@ rd_kafka_queue_t *rkqu); /** - * @section DeleteAcls - delete access control lists. + * DeleteAcls - delete access control lists. * * */ @@ -8025,7 +8888,7 @@ * the global rd_kafka_fatal_error() code. * Fatal errors are raised by triggering the \c error_cb (see the * Fatal error chapter in INTRODUCTION.md for more information), and any - * sub-sequent transactional API calls will return RD_KAFKA_RESP_ERR__FATAL + * subsequent transactional API calls will return RD_KAFKA_RESP_ERR__FATAL * or have the fatal flag set (see rd_kafka_error_is_fatal()). * The originating fatal error code can be retrieved by calling * rd_kafka_fatal_error(). @@ -8085,9 +8948,15 @@ * @param timeout_ms The maximum time to block. On timeout the operation * may continue in the background, depending on state, * and it is okay to call init_transactions() again. + * If an infinite timeout (-1) is passed, the timeout will + * be adjusted to 2 * \c transaction.timeout.ms. * * @remark This function may block up to \p timeout_ms milliseconds. * + * @remark This call is resumable when a retriable timeout error is returned. + * Calling the function again will resume the operation that is + * progressing in the background. + * * @returns NULL on success or an error object on failure. * Check whether the returned error object permits retrying * by calling rd_kafka_error_is_retriable(), or whether a fatal @@ -8203,8 +9072,17 @@ * * @remark Logical and invalid offsets (such as RD_KAFKA_OFFSET_INVALID) in * \p offsets will be ignored, if there are no valid offsets in - * \p offsets the function will return RD_KAFKA_RESP_ERR_NO_ERROR - * and no action will be taken. + * \p offsets the function will return NULL and no action will be taken. + * + * @remark This call is retriable but not resumable, which means a new request + * with a new set of provided offsets and group metadata will be + * sent to the transaction coordinator if the call is retried. + * + * @remark It is highly recommended to retry the call (upon retriable error) + * with identical \p offsets and \p cgmetadata parameters. + * Failure to do so risks inconsistent state between what is actually + * included in the transaction and what the application thinks is + * included in the transaction. * * @returns NULL on success or an error object on failure. * Check whether the returned error object permits retrying @@ -8225,9 +9103,7 @@ * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been * configured for the producer instance, * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance, - * or if the \p consumer_group_id or \p offsets are empty, - * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous - * rd_kafka_send_offsets_to_transaction() call is still in progress. + * or if the \p consumer_group_id or \p offsets are empty. * Other error codes not listed here may be returned, depending on * broker version. * @@ -8280,6 +9156,10 @@ * serve the event queue in a separate thread since rd_kafka_flush() * will not serve delivery reports in this mode. * + * @remark This call is resumable when a retriable timeout error is returned. + * Calling the function again will resume the operation that is + * progressing in the background. + * * @returns NULL on success or an error object on failure. * Check whether the returned error object permits retrying * by calling rd_kafka_error_is_retriable(), or whether an abortable @@ -8339,7 +9219,10 @@ * If the application has enabled RD_KAFKA_EVENT_DR it must * serve the event queue in a separate thread since rd_kafka_flush() * will not serve delivery reports in this mode. - + * + * @remark This call is resumable when a retriable timeout error is returned. + * Calling the function again will resume the operation that is + * progressing in the background. * * @returns NULL on success or an error object on failure. * Check whether the returned error object permits retrying diff -Nru librdkafka-1.9.2/src/rdkafka_idempotence.c librdkafka-2.0.2/src/rdkafka_idempotence.c --- librdkafka-1.9.2/src/rdkafka_idempotence.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_idempotence.c 2023-01-20 09:14:36.000000000 +0000 @@ -243,10 +243,22 @@ case RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT: /* Waiting for broker/coordinator to become available */ if (rd_kafka_is_transactional(rk)) { - /* Assert that a coordinator has been assigned by - * inspecting txn_curr_coord (the real broker) - * rather than txn_coord (the logical broker). */ - rd_assert(rk->rk_eos.txn_curr_coord); + /* Check that a proper coordinator broker has + * been assigned by inspecting txn_curr_coord + * (the real broker) rather than txn_coord + * (the logical broker). */ + if (!rk->rk_eos.txn_curr_coord) { + /* + * Can happen if the coordinator wasn't set or + * wasn't up initially and has been set to NULL + * after a COORDINATOR_NOT_AVAILABLE error in + * FindCoordinatorResponse. When the coordinator + * is known this FSM will be called again. + */ + rd_kafka_txn_coord_query( + rk, "Awaiting coordinator"); + return; + } rkb = rk->rk_eos.txn_coord; rd_kafka_broker_keep(rkb); @@ -355,6 +367,11 @@ /* Wait for outstanding ProduceRequests to finish * before bumping the current epoch. */ break; + + case RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT: + /* Wait for txnmgr to abort its current transaction + * and then trigger a drain & reset or bump. */ + break; } } @@ -433,6 +450,8 @@ err == RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE)) rd_kafka_txn_coord_set(rk, NULL, "%s", errstr); + /* This error code is read by init_transactions() for propagation + * to the application. */ rk->rk_eos.txn_init_err = err; rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID); @@ -597,41 +616,74 @@ * @brief Schedule an epoch bump when the local ProduceRequest queues * have been fully drained. * - * The PID is not bumped until the queues are fully drained. + * The PID is not bumped until the queues are fully drained and the current + * transaction is aborted (if any). * + * @param allow_txn_abort If this is a transactional producer and this flag is + * true then we trigger an abortable txn error to abort + * the current transaction first. The txnmgr will later + * call us back with this flag set to false to go ahead + * with the epoch bump. * @param fmt is a human-readable reason for the bump * * * @locality any * @locks none */ -void rd_kafka_idemp_drain_epoch_bump(rd_kafka_t *rk, - rd_kafka_resp_err_t err, - const char *fmt, - ...) { +void rd_kafka_idemp_drain_epoch_bump0(rd_kafka_t *rk, + rd_bool_t allow_txn_abort, + rd_kafka_resp_err_t err, + const char *fmt, + ...) { va_list ap; char buf[256]; + rd_bool_t requires_txn_abort = + allow_txn_abort && rd_kafka_is_transactional(rk); va_start(ap, fmt); rd_vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); rd_kafka_wrlock(rk); - rd_kafka_dbg(rk, EOS, "DRAIN", - "Beginning partition drain for %s epoch bump " - "for %d partition(s) with in-flight requests: %s", - rd_kafka_pid2str(rk->rk_eos.pid), - rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), buf); - rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_BUMP); + + + if (requires_txn_abort) { + rd_kafka_dbg(rk, EOS, "DRAIN", + "Need transaction abort before beginning " + "partition drain in state %s for %s epoch bump " + "for %d partition(s) with in-flight requests: %s", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), + rd_kafka_pid2str(rk->rk_eos.pid), + rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), + buf); + rd_kafka_idemp_set_state(rk, + RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT); + + } else { + rd_kafka_dbg(rk, EOS, "DRAIN", + "Beginning partition drain in state %s " + "for %s epoch bump " + "for %d partition(s) with in-flight requests: %s", + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), + rd_kafka_pid2str(rk->rk_eos.pid), + rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), + buf); + + rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_BUMP); + } + rd_kafka_wrunlock(rk); - /* Transactions: bumping the epoch requires the current transaction - * to be aborted. */ - if (rd_kafka_is_transactional(rk)) + if (requires_txn_abort) { + /* Transactions: bumping the epoch requires the current + * transaction to be aborted first. */ rd_kafka_txn_set_abortable_error_with_bump(rk, err, "%s", buf); - /* Check right away if the drain could be done. */ - rd_kafka_idemp_check_drain_done(rk); + } else { + /* Idempotent producer: check right away if the drain could + * be done. */ + rd_kafka_idemp_check_drain_done(rk); + } } /** @@ -698,7 +750,10 @@ return; rd_kafka_wrlock(rk); - rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID); + /* Don't restart PID acquisition if there's already an outstanding + * request. */ + if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_WAIT_PID) + rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID); rd_kafka_wrunlock(rk); /* Schedule request timer */ diff -Nru librdkafka-1.9.2/src/rdkafka_idempotence.h librdkafka-2.0.2/src/rdkafka_idempotence.h --- librdkafka-1.9.2/src/rdkafka_idempotence.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_idempotence.h 2023-01-20 09:14:36.000000000 +0000 @@ -41,6 +41,9 @@ /** * @brief Get the current PID if state permits. * + * @param bumpable If true, return PID even if it may only be used for + * bumping the Epoch. + * * @returns If there is no valid PID or the state * does not permit further PID usage (such as when draining) * then an invalid PID is returned. @@ -49,13 +52,18 @@ * @locks none */ static RD_UNUSED RD_INLINE rd_kafka_pid_t -rd_kafka_idemp_get_pid0(rd_kafka_t *rk, rd_bool_t do_lock) { +rd_kafka_idemp_get_pid0(rd_kafka_t *rk, + rd_dolock_t do_lock, + rd_bool_t bumpable) { rd_kafka_pid_t pid; if (do_lock) rd_kafka_rdlock(rk); if (likely(rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED)) pid = rk->rk_eos.pid; + else if (unlikely(bumpable && rk->rk_eos.idemp_state == + RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT)) + pid = rk->rk_eos.pid; else rd_kafka_pid_reset(&pid); if (do_lock) @@ -64,7 +72,8 @@ return pid; } -#define rd_kafka_idemp_get_pid(rk) rd_kafka_idemp_get_pid0(rk, rd_true /*lock*/) +#define rd_kafka_idemp_get_pid(rk) \ + rd_kafka_idemp_get_pid0(rk, RD_DO_LOCK, rd_false) void rd_kafka_idemp_set_state(rd_kafka_t *rk, rd_kafka_idemp_state_t new_state); void rd_kafka_idemp_request_pid_failed(rd_kafka_broker_t *rkb, @@ -73,10 +82,14 @@ const rd_kafka_pid_t pid); void rd_kafka_idemp_pid_fsm(rd_kafka_t *rk); void rd_kafka_idemp_drain_reset(rd_kafka_t *rk, const char *reason); -void rd_kafka_idemp_drain_epoch_bump(rd_kafka_t *rk, - rd_kafka_resp_err_t err, - const char *fmt, - ...) RD_FORMAT(printf, 3, 4); +void rd_kafka_idemp_drain_epoch_bump0(rd_kafka_t *rk, + rd_bool_t allow_txn_abort, + rd_kafka_resp_err_t err, + const char *fmt, + ...) RD_FORMAT(printf, 4, 5); +#define rd_kafka_idemp_drain_epoch_bump(rk, err, ...) \ + rd_kafka_idemp_drain_epoch_bump0(rk, rd_true, err, __VA_ARGS__) + void rd_kafka_idemp_drain_toppar(rd_kafka_toppar_t *rktp, const char *reason); void rd_kafka_idemp_inflight_toppar_sub(rd_kafka_t *rk, rd_kafka_toppar_t *rktp); diff -Nru librdkafka-1.9.2/src/rdkafka_interceptor.c librdkafka-2.0.2/src/rdkafka_interceptor.c --- librdkafka-1.9.2/src/rdkafka_interceptor.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_interceptor.c 2023-01-20 09:14:36.000000000 +0000 @@ -49,6 +49,8 @@ *on_response_received; rd_kafka_interceptor_f_on_thread_start_t *on_thread_start; rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit; + rd_kafka_interceptor_f_on_broker_state_change_t + *on_broker_state_change; void *generic; /* For easy assignment */ } u; @@ -174,6 +176,7 @@ rd_list_destroy(&conf->interceptors.on_response_received); rd_list_destroy(&conf->interceptors.on_thread_start); rd_list_destroy(&conf->interceptors.on_thread_exit); + rd_list_destroy(&conf->interceptors.on_broker_state_change); /* Interceptor config */ rd_list_destroy(&conf->interceptors.config); @@ -224,6 +227,9 @@ rd_list_init(&conf->interceptors.on_thread_exit, 0, rd_kafka_interceptor_method_destroy) ->rl_flags |= RD_LIST_F_UNIQUE; + rd_list_init(&conf->interceptors.on_broker_state_change, 0, + rd_kafka_interceptor_method_destroy) + ->rl_flags |= RD_LIST_F_UNIQUE; /* Interceptor config */ rd_list_init(&conf->interceptors.config, 0, @@ -618,6 +624,34 @@ } +/** + * @brief Call interceptor on_broker_state_change methods. + * @locality any. + */ +void rd_kafka_interceptors_on_broker_state_change(rd_kafka_t *rk, + int32_t broker_id, + const char *secproto, + const char *name, + int port, + const char *state) { + rd_kafka_interceptor_method_t *method; + int i; + + RD_LIST_FOREACH(method, + &rk->rk_conf.interceptors.on_broker_state_change, i) { + rd_kafka_resp_err_t ic_err; + + ic_err = method->u.on_broker_state_change( + rk, broker_id, secproto, name, port, state, + method->ic_opaque); + if (unlikely(ic_err)) + rd_kafka_interceptor_failed(rk, method, + "on_broker_state_change", + ic_err, NULL, NULL); + } +} + + /** * @name Public API (backend) @@ -771,3 +805,15 @@ &rk->rk_conf.interceptors.on_thread_exit, ic_name, (void *)on_thread_exit, ic_opaque); } + + +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, + void *ic_opaque) { + assert(!rk->rk_initialized); + return rd_kafka_interceptor_method_add( + &rk->rk_conf.interceptors.on_broker_state_change, ic_name, + (void *)on_broker_state_change, ic_opaque); +} diff -Nru librdkafka-1.9.2/src/rdkafka_interceptor.h librdkafka-2.0.2/src/rdkafka_interceptor.h --- librdkafka-1.9.2/src/rdkafka_interceptor.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_interceptor.h 2023-01-20 09:14:36.000000000 +0000 @@ -82,6 +82,13 @@ void rd_kafka_interceptors_on_thread_exit(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type); +void rd_kafka_interceptors_on_broker_state_change(rd_kafka_t *rk, + int32_t broker_id, + const char *secproto, + const char *name, + int port, + const char *state); + void rd_kafka_conf_interceptor_ctor(int scope, void *pconf); void rd_kafka_conf_interceptor_dtor(int scope, void *pconf); void rd_kafka_conf_interceptor_copy(int scope, diff -Nru librdkafka-1.9.2/src/rdkafka_int.h librdkafka-2.0.2/src/rdkafka_int.h --- librdkafka-1.9.2/src/rdkafka_int.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_int.h 2023-01-20 09:14:36.000000000 +0000 @@ -105,6 +105,7 @@ #define RD_KAFKAP_BROKERS_MAX 10000 #define RD_KAFKAP_TOPICS_MAX 1000000 #define RD_KAFKAP_PARTITIONS_MAX 100000 +#define RD_KAFKAP_GROUPS_MAX 100000 #define RD_KAFKA_OFFSET_IS_LOGICAL(OFF) ((OFF) < 0) @@ -123,14 +124,17 @@ * become available. */ RD_KAFKA_IDEMP_STATE_WAIT_PID, /**< PID requested, waiting for reply */ RD_KAFKA_IDEMP_STATE_ASSIGNED, /**< New PID assigned */ - RD_KAFKA_IDEMP_STATE_DRAIN_RESET, /**< Wait for outstanding - * ProduceRequests to finish - * before resetting and - * re-requesting a new PID. */ - RD_KAFKA_IDEMP_STATE_DRAIN_BUMP, /**< Wait for outstanding - * ProduceRequests to finish - * before bumping the current - * epoch. */ + RD_KAFKA_IDEMP_STATE_DRAIN_RESET, /**< Wait for outstanding + * ProduceRequests to finish + * before resetting and + * re-requesting a new PID. */ + RD_KAFKA_IDEMP_STATE_DRAIN_BUMP, /**< Wait for outstanding + * ProduceRequests to finish + * before bumping the current + * epoch. */ + RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT, /**< Wait for transaction abort + * to finish and trigger a + * drain and reset or bump. */ } rd_kafka_idemp_state_t; /** @@ -140,7 +144,7 @@ rd_kafka_idemp_state2str(rd_kafka_idemp_state_t state) { static const char *names[] = { "Init", "Terminate", "FatalError", "RequestPID", "WaitTransport", - "WaitPID", "Assigned", "DrainReset", "DrainBump"}; + "WaitPID", "Assigned", "DrainReset", "DrainBump", "WaitTxnAbort"}; return names[state]; } @@ -169,6 +173,8 @@ /**< Transaction successfully committed but application has not made * a successful commit_transaction() call yet. */ RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED, + /**< begin_transaction() has been called. */ + RD_KAFKA_TXN_STATE_BEGIN_ABORT, /**< abort_transaction() has been called. */ RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, /**< Transaction successfully aborted but application has not made @@ -194,6 +200,7 @@ "BeginCommit", "CommittingTransaction", "CommitNotAcked", + "BeginAbort", "AbortingTransaction", "AbortedNotAcked", "AbortableError", @@ -397,55 +404,34 @@ * Only one transactional API call is allowed at any time. * Protected by the rk_lock. */ struct { - char name[64]; /**< API name, e.g., - * SendOffsetsToTransaction */ - rd_kafka_timer_t tmr; /**< Timeout timer, the timeout - * is specified by the app. */ - - int flags; /**< Flags */ -#define RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT \ - 0x1 /**< Set state to abortable \ - * error on timeout, \ - * i.e., fail the txn, \ - * and set txn_requires_abort \ - * on the returned error. \ - */ -#define RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT \ - 0x2 /**< Set retriable flag \ - * on the error \ - * on timeout. */ -#define RD_KAFKA_TXN_CURR_API_F_FOR_REUSE \ - 0x4 /**< Do not reset the \ - * current API when it \ - * completes successfully \ - * Instead keep it alive \ - * and allow reuse with \ - * .._F_REUSE, blocking \ - * any non-F_REUSE \ - * curr API calls. */ -#define RD_KAFKA_TXN_CURR_API_F_REUSE \ - 0x8 /**< Reuse/continue with \ - * current API state. \ - * This is used for \ - * multi-stage APIs, \ - * such as txn commit. */ + char name[64]; /**< API name, e.g., + * send_offsets_to_transaction. + * This is used to make sure + * conflicting APIs are not + * called simultaneously. */ + rd_bool_t calling; /**< API is being actively called. + * I.e., application is blocking + * on a txn API call. + * This is used to make sure + * no concurrent API calls are + * being made. */ + rd_kafka_error_t *error; /**< Last error from background + * processing. This is only + * set if the application's + * API call timed out. + * It will be returned on + * the next call. */ + rd_bool_t has_result; /**< Indicates whether an API + * result (possibly + * intermediate) has been set. + */ + cnd_t cnd; /**< Application thread will + * block on this cnd waiting + * for a result to be set. */ + mtx_t lock; /**< Protects all fields of + * txn_curr_api. */ } txn_curr_api; - /**< Copy (and reference) of the original init_transactions(), - * but out-lives the timeout of the curr API. - * This is used as the reply queue for when the - * black box idempotent producer has acquired the - * initial PID (or fails to do so). - * Since that acquisition may take longer than the - * init_transactions() API timeout this extra reference - * needs to be kept around. - * If the originating init_transactions() call has timed - * out and returned this queue reference simply points - * to a disabled queue that will discard any ops enqueued. - * - * @locks rk_lock - */ - rd_kafka_q_t *txn_init_rkq; int txn_req_cnt; /**< Number of transaction * requests sent. @@ -652,9 +638,11 @@ return RD_KAFKA_RESP_ERR_NO_ERROR; mtx_lock(&rk->rk_curr_msgs.lock); - while (unlikely(rk->rk_curr_msgs.cnt + cnt > rk->rk_curr_msgs.max_cnt || - (unsigned long long)(rk->rk_curr_msgs.size + size) > - (unsigned long long)rk->rk_curr_msgs.max_size)) { + while ( + unlikely((rk->rk_curr_msgs.max_cnt > 0 && + rk->rk_curr_msgs.cnt + cnt > rk->rk_curr_msgs.max_cnt) || + (unsigned long long)(rk->rk_curr_msgs.size + size) > + (unsigned long long)rk->rk_curr_msgs.max_size)) { if (!block) { mtx_unlock(&rk->rk_curr_msgs.lock); return RD_KAFKA_RESP_ERR__QUEUE_FULL; diff -Nru librdkafka-1.9.2/src/rdkafka_mock.c librdkafka-2.0.2/src/rdkafka_mock.c --- librdkafka-1.9.2/src/rdkafka_mock.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_mock.c 2023-01-20 09:14:36.000000000 +0000 @@ -1877,6 +1877,34 @@ } +rd_kafka_resp_err_t +rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int16_t ApiKey, + size_t *cntp) { + rd_kafka_mock_broker_t *mrkb; + rd_kafka_mock_error_stack_t *errstack; + + if (!mcluster || !cntp) + return RD_KAFKA_RESP_ERR__INVALID_ARG; + + mtx_lock(&mcluster->lock); + + if (!(mrkb = rd_kafka_mock_broker_find(mcluster, broker_id))) { + mtx_unlock(&mcluster->lock); + return RD_KAFKA_RESP_ERR__UNKNOWN_BROKER; + } + + if ((errstack = + rd_kafka_mock_error_stack_find(&mrkb->errstacks, ApiKey))) + *cntp = errstack->cnt; + + mtx_unlock(&mcluster->lock); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + + void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err) { diff -Nru librdkafka-1.9.2/src/rdkafka_mock.h librdkafka-2.0.2/src/rdkafka_mock.h --- librdkafka-1.9.2/src/rdkafka_mock.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_mock.h 2023-01-20 09:14:36.000000000 +0000 @@ -190,6 +190,27 @@ ...); + +/** + * @brief Get the count of errors in the broker's error stack for + * the given \p ApiKey. + * + * @param mcluster the mock cluster. + * @param broker_id id of the broker in the cluster. + * @param ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0). + * @param cntp pointer for receiving the count. + * + * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR if the count was retrieved, + * \c RD_KAFKA_RESP_ERR__UNKNOWN_BROKER if there was no broker with this id, + * \c RD_KAFKA_RESP_ERR__INVALID_ARG if some of the parameters are not valid. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int16_t ApiKey, + size_t *cntp); + + /** * @brief Set the topic error to return in protocol requests. * diff -Nru librdkafka-1.9.2/src/rdkafka_op.c librdkafka-2.0.2/src/rdkafka_op.c --- librdkafka-1.9.2/src/rdkafka_op.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_op.c 2023-01-20 09:14:36.000000000 +0000 @@ -43,50 +43,57 @@ const char *rd_kafka_op2str(rd_kafka_op_type_t type) { int skiplen = 6; static const char *names[RD_KAFKA_OP__END] = { - [RD_KAFKA_OP_NONE] = "REPLY:NONE", - [RD_KAFKA_OP_FETCH] = "REPLY:FETCH", - [RD_KAFKA_OP_ERR] = "REPLY:ERR", - [RD_KAFKA_OP_CONSUMER_ERR] = "REPLY:CONSUMER_ERR", - [RD_KAFKA_OP_DR] = "REPLY:DR", - [RD_KAFKA_OP_STATS] = "REPLY:STATS", - [RD_KAFKA_OP_OFFSET_COMMIT] = "REPLY:OFFSET_COMMIT", - [RD_KAFKA_OP_NODE_UPDATE] = "REPLY:NODE_UPDATE", - [RD_KAFKA_OP_XMIT_BUF] = "REPLY:XMIT_BUF", - [RD_KAFKA_OP_RECV_BUF] = "REPLY:RECV_BUF", - [RD_KAFKA_OP_XMIT_RETRY] = "REPLY:XMIT_RETRY", - [RD_KAFKA_OP_FETCH_START] = "REPLY:FETCH_START", - [RD_KAFKA_OP_FETCH_STOP] = "REPLY:FETCH_STOP", - [RD_KAFKA_OP_SEEK] = "REPLY:SEEK", - [RD_KAFKA_OP_PAUSE] = "REPLY:PAUSE", - [RD_KAFKA_OP_OFFSET_FETCH] = "REPLY:OFFSET_FETCH", - [RD_KAFKA_OP_PARTITION_JOIN] = "REPLY:PARTITION_JOIN", - [RD_KAFKA_OP_PARTITION_LEAVE] = "REPLY:PARTITION_LEAVE", - [RD_KAFKA_OP_REBALANCE] = "REPLY:REBALANCE", - [RD_KAFKA_OP_TERMINATE] = "REPLY:TERMINATE", - [RD_KAFKA_OP_COORD_QUERY] = "REPLY:COORD_QUERY", - [RD_KAFKA_OP_SUBSCRIBE] = "REPLY:SUBSCRIBE", - [RD_KAFKA_OP_ASSIGN] = "REPLY:ASSIGN", - [RD_KAFKA_OP_GET_SUBSCRIPTION] = "REPLY:GET_SUBSCRIPTION", - [RD_KAFKA_OP_GET_ASSIGNMENT] = "REPLY:GET_ASSIGNMENT", - [RD_KAFKA_OP_THROTTLE] = "REPLY:THROTTLE", - [RD_KAFKA_OP_NAME] = "REPLY:NAME", - [RD_KAFKA_OP_CG_METADATA] = "REPLY:CG_METADATA", - [RD_KAFKA_OP_OFFSET_RESET] = "REPLY:OFFSET_RESET", - [RD_KAFKA_OP_METADATA] = "REPLY:METADATA", - [RD_KAFKA_OP_LOG] = "REPLY:LOG", - [RD_KAFKA_OP_WAKEUP] = "REPLY:WAKEUP", - [RD_KAFKA_OP_CREATETOPICS] = "REPLY:CREATETOPICS", - [RD_KAFKA_OP_DELETETOPICS] = "REPLY:DELETETOPICS", - [RD_KAFKA_OP_CREATEPARTITIONS] = "REPLY:CREATEPARTITIONS", - [RD_KAFKA_OP_ALTERCONFIGS] = "REPLY:ALTERCONFIGS", - [RD_KAFKA_OP_DESCRIBECONFIGS] = "REPLY:DESCRIBECONFIGS", - [RD_KAFKA_OP_DELETERECORDS] = "REPLY:DELETERECORDS", - [RD_KAFKA_OP_DELETEGROUPS] = "REPLY:DELETEGROUPS", + [RD_KAFKA_OP_NONE] = "REPLY:NONE", + [RD_KAFKA_OP_FETCH] = "REPLY:FETCH", + [RD_KAFKA_OP_ERR] = "REPLY:ERR", + [RD_KAFKA_OP_CONSUMER_ERR] = "REPLY:CONSUMER_ERR", + [RD_KAFKA_OP_DR] = "REPLY:DR", + [RD_KAFKA_OP_STATS] = "REPLY:STATS", + [RD_KAFKA_OP_OFFSET_COMMIT] = "REPLY:OFFSET_COMMIT", + [RD_KAFKA_OP_NODE_UPDATE] = "REPLY:NODE_UPDATE", + [RD_KAFKA_OP_XMIT_BUF] = "REPLY:XMIT_BUF", + [RD_KAFKA_OP_RECV_BUF] = "REPLY:RECV_BUF", + [RD_KAFKA_OP_XMIT_RETRY] = "REPLY:XMIT_RETRY", + [RD_KAFKA_OP_FETCH_START] = "REPLY:FETCH_START", + [RD_KAFKA_OP_FETCH_STOP] = "REPLY:FETCH_STOP", + [RD_KAFKA_OP_SEEK] = "REPLY:SEEK", + [RD_KAFKA_OP_PAUSE] = "REPLY:PAUSE", + [RD_KAFKA_OP_OFFSET_FETCH] = "REPLY:OFFSET_FETCH", + [RD_KAFKA_OP_PARTITION_JOIN] = "REPLY:PARTITION_JOIN", + [RD_KAFKA_OP_PARTITION_LEAVE] = "REPLY:PARTITION_LEAVE", + [RD_KAFKA_OP_REBALANCE] = "REPLY:REBALANCE", + [RD_KAFKA_OP_TERMINATE] = "REPLY:TERMINATE", + [RD_KAFKA_OP_COORD_QUERY] = "REPLY:COORD_QUERY", + [RD_KAFKA_OP_SUBSCRIBE] = "REPLY:SUBSCRIBE", + [RD_KAFKA_OP_ASSIGN] = "REPLY:ASSIGN", + [RD_KAFKA_OP_GET_SUBSCRIPTION] = "REPLY:GET_SUBSCRIPTION", + [RD_KAFKA_OP_GET_ASSIGNMENT] = "REPLY:GET_ASSIGNMENT", + [RD_KAFKA_OP_THROTTLE] = "REPLY:THROTTLE", + [RD_KAFKA_OP_NAME] = "REPLY:NAME", + [RD_KAFKA_OP_CG_METADATA] = "REPLY:CG_METADATA", + [RD_KAFKA_OP_OFFSET_RESET] = "REPLY:OFFSET_RESET", + [RD_KAFKA_OP_METADATA] = "REPLY:METADATA", + [RD_KAFKA_OP_LOG] = "REPLY:LOG", + [RD_KAFKA_OP_WAKEUP] = "REPLY:WAKEUP", + [RD_KAFKA_OP_CREATETOPICS] = "REPLY:CREATETOPICS", + [RD_KAFKA_OP_DELETETOPICS] = "REPLY:DELETETOPICS", + [RD_KAFKA_OP_CREATEPARTITIONS] = "REPLY:CREATEPARTITIONS", + [RD_KAFKA_OP_ALTERCONFIGS] = "REPLY:ALTERCONFIGS", + [RD_KAFKA_OP_DESCRIBECONFIGS] = "REPLY:DESCRIBECONFIGS", + [RD_KAFKA_OP_DELETERECORDS] = "REPLY:DELETERECORDS", + [RD_KAFKA_OP_LISTCONSUMERGROUPS] = "REPLY:LISTCONSUMERGROUPS", + [RD_KAFKA_OP_DESCRIBECONSUMERGROUPS] = + "REPLY:DESCRIBECONSUMERGROUPS", + [RD_KAFKA_OP_DELETEGROUPS] = "REPLY:DELETEGROUPS", [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] = "REPLY:DELETECONSUMERGROUPOFFSETS", - [RD_KAFKA_OP_CREATEACLS] = "REPLY:CREATEACLS", - [RD_KAFKA_OP_DESCRIBEACLS] = "REPLY:DESCRIBEACLS", - [RD_KAFKA_OP_DELETEACLS] = "REPLY:DELETEACLS", + [RD_KAFKA_OP_CREATEACLS] = "REPLY:CREATEACLS", + [RD_KAFKA_OP_DESCRIBEACLS] = "REPLY:DESCRIBEACLS", + [RD_KAFKA_OP_DELETEACLS] = "REPLY:DELETEACLS", + [RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS] = + "REPLY:ALTERCONSUMERGROUPOFFSETS", + [RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS] = + "REPLY:LISTCONSUMERGROUPOFFSETS", [RD_KAFKA_OP_ADMIN_FANOUT] = "REPLY:ADMIN_FANOUT", [RD_KAFKA_OP_ADMIN_RESULT] = "REPLY:ADMIN_RESULT", [RD_KAFKA_OP_PURGE] = "REPLY:PURGE", @@ -186,49 +193,56 @@ * if we forgot to add an op type to \ * this list. */ static const size_t op2size[RD_KAFKA_OP__END] = { - [RD_KAFKA_OP_FETCH] = sizeof(rko->rko_u.fetch), - [RD_KAFKA_OP_ERR] = sizeof(rko->rko_u.err), - [RD_KAFKA_OP_CONSUMER_ERR] = sizeof(rko->rko_u.err), - [RD_KAFKA_OP_DR] = sizeof(rko->rko_u.dr), - [RD_KAFKA_OP_STATS] = sizeof(rko->rko_u.stats), - [RD_KAFKA_OP_OFFSET_COMMIT] = sizeof(rko->rko_u.offset_commit), - [RD_KAFKA_OP_NODE_UPDATE] = sizeof(rko->rko_u.node), - [RD_KAFKA_OP_XMIT_BUF] = sizeof(rko->rko_u.xbuf), - [RD_KAFKA_OP_RECV_BUF] = sizeof(rko->rko_u.xbuf), - [RD_KAFKA_OP_XMIT_RETRY] = sizeof(rko->rko_u.xbuf), - [RD_KAFKA_OP_FETCH_START] = sizeof(rko->rko_u.fetch_start), - [RD_KAFKA_OP_FETCH_STOP] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_SEEK] = sizeof(rko->rko_u.fetch_start), - [RD_KAFKA_OP_PAUSE] = sizeof(rko->rko_u.pause), - [RD_KAFKA_OP_OFFSET_FETCH] = sizeof(rko->rko_u.offset_fetch), - [RD_KAFKA_OP_PARTITION_JOIN] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_PARTITION_LEAVE] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_REBALANCE] = sizeof(rko->rko_u.rebalance), - [RD_KAFKA_OP_TERMINATE] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_COORD_QUERY] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_SUBSCRIBE] = sizeof(rko->rko_u.subscribe), - [RD_KAFKA_OP_ASSIGN] = sizeof(rko->rko_u.assign), - [RD_KAFKA_OP_GET_SUBSCRIPTION] = sizeof(rko->rko_u.subscribe), - [RD_KAFKA_OP_GET_ASSIGNMENT] = sizeof(rko->rko_u.assign), - [RD_KAFKA_OP_THROTTLE] = sizeof(rko->rko_u.throttle), - [RD_KAFKA_OP_NAME] = sizeof(rko->rko_u.name), - [RD_KAFKA_OP_CG_METADATA] = sizeof(rko->rko_u.cg_metadata), - [RD_KAFKA_OP_OFFSET_RESET] = sizeof(rko->rko_u.offset_reset), - [RD_KAFKA_OP_METADATA] = sizeof(rko->rko_u.metadata), - [RD_KAFKA_OP_LOG] = sizeof(rko->rko_u.log), - [RD_KAFKA_OP_WAKEUP] = _RD_KAFKA_OP_EMPTY, - [RD_KAFKA_OP_CREATETOPICS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DELETETOPICS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_CREATEPARTITIONS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_ALTERCONFIGS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DESCRIBECONFIGS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DELETERECORDS] = sizeof(rko->rko_u.admin_request), - [RD_KAFKA_OP_DELETEGROUPS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_FETCH] = sizeof(rko->rko_u.fetch), + [RD_KAFKA_OP_ERR] = sizeof(rko->rko_u.err), + [RD_KAFKA_OP_CONSUMER_ERR] = sizeof(rko->rko_u.err), + [RD_KAFKA_OP_DR] = sizeof(rko->rko_u.dr), + [RD_KAFKA_OP_STATS] = sizeof(rko->rko_u.stats), + [RD_KAFKA_OP_OFFSET_COMMIT] = sizeof(rko->rko_u.offset_commit), + [RD_KAFKA_OP_NODE_UPDATE] = sizeof(rko->rko_u.node), + [RD_KAFKA_OP_XMIT_BUF] = sizeof(rko->rko_u.xbuf), + [RD_KAFKA_OP_RECV_BUF] = sizeof(rko->rko_u.xbuf), + [RD_KAFKA_OP_XMIT_RETRY] = sizeof(rko->rko_u.xbuf), + [RD_KAFKA_OP_FETCH_START] = sizeof(rko->rko_u.fetch_start), + [RD_KAFKA_OP_FETCH_STOP] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_SEEK] = sizeof(rko->rko_u.fetch_start), + [RD_KAFKA_OP_PAUSE] = sizeof(rko->rko_u.pause), + [RD_KAFKA_OP_OFFSET_FETCH] = sizeof(rko->rko_u.offset_fetch), + [RD_KAFKA_OP_PARTITION_JOIN] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_PARTITION_LEAVE] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_REBALANCE] = sizeof(rko->rko_u.rebalance), + [RD_KAFKA_OP_TERMINATE] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_COORD_QUERY] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_SUBSCRIBE] = sizeof(rko->rko_u.subscribe), + [RD_KAFKA_OP_ASSIGN] = sizeof(rko->rko_u.assign), + [RD_KAFKA_OP_GET_SUBSCRIPTION] = sizeof(rko->rko_u.subscribe), + [RD_KAFKA_OP_GET_ASSIGNMENT] = sizeof(rko->rko_u.assign), + [RD_KAFKA_OP_THROTTLE] = sizeof(rko->rko_u.throttle), + [RD_KAFKA_OP_NAME] = sizeof(rko->rko_u.name), + [RD_KAFKA_OP_CG_METADATA] = sizeof(rko->rko_u.cg_metadata), + [RD_KAFKA_OP_OFFSET_RESET] = sizeof(rko->rko_u.offset_reset), + [RD_KAFKA_OP_METADATA] = sizeof(rko->rko_u.metadata), + [RD_KAFKA_OP_LOG] = sizeof(rko->rko_u.log), + [RD_KAFKA_OP_WAKEUP] = _RD_KAFKA_OP_EMPTY, + [RD_KAFKA_OP_CREATETOPICS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETETOPICS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_CREATEPARTITIONS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_ALTERCONFIGS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DESCRIBECONFIGS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETERECORDS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_LISTCONSUMERGROUPS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DESCRIBECONSUMERGROUPS] = + sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_DELETEGROUPS] = sizeof(rko->rko_u.admin_request), [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] = sizeof(rko->rko_u.admin_request), [RD_KAFKA_OP_CREATEACLS] = sizeof(rko->rko_u.admin_request), [RD_KAFKA_OP_DESCRIBEACLS] = sizeof(rko->rko_u.admin_request), [RD_KAFKA_OP_DELETEACLS] = sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS] = + sizeof(rko->rko_u.admin_request), + [RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS] = + sizeof(rko->rko_u.admin_request), [RD_KAFKA_OP_ADMIN_FANOUT] = sizeof(rko->rko_u.admin_request), [RD_KAFKA_OP_ADMIN_RESULT] = sizeof(rko->rko_u.admin_result), [RD_KAFKA_OP_PURGE] = sizeof(rko->rko_u.purge), @@ -375,13 +389,22 @@ case RD_KAFKA_OP_ALTERCONFIGS: case RD_KAFKA_OP_DESCRIBECONFIGS: case RD_KAFKA_OP_DELETERECORDS: + case RD_KAFKA_OP_LISTCONSUMERGROUPS: + case RD_KAFKA_OP_DESCRIBECONSUMERGROUPS: case RD_KAFKA_OP_DELETEGROUPS: case RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS: case RD_KAFKA_OP_CREATEACLS: case RD_KAFKA_OP_DESCRIBEACLS: case RD_KAFKA_OP_DELETEACLS: + case RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS: + case RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS: rd_kafka_replyq_destroy(&rko->rko_u.admin_request.replyq); rd_list_destroy(&rko->rko_u.admin_request.args); + if (rko->rko_u.admin_request.options.match_consumer_group_states + .u.PTR) { + rd_list_destroy(rko->rko_u.admin_request.options + .match_consumer_group_states.u.PTR); + } rd_assert(!rko->rko_u.admin_request.fanout_parent); RD_IF_FREE(rko->rko_u.admin_request.coordkey, rd_free); break; diff -Nru librdkafka-1.9.2/src/rdkafka_op.h librdkafka-2.0.2/src/rdkafka_op.h --- librdkafka-1.9.2/src/rdkafka_op.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_op.h 2023-01-20 09:14:36.000000000 +0000 @@ -131,24 +131,36 @@ * u.admin_request*/ RD_KAFKA_OP_DELETERECORDS, /**< Admin: DeleteRecords: * u.admin_request*/ - RD_KAFKA_OP_DELETEGROUPS, /**< Admin: DeleteGroups: u.admin_request*/ + RD_KAFKA_OP_LISTCONSUMERGROUPS, /**< Admin: + * ListConsumerGroups + * u.admin_request */ + RD_KAFKA_OP_DESCRIBECONSUMERGROUPS, /**< Admin: + * DescribeConsumerGroups + * u.admin_request */ + RD_KAFKA_OP_DELETEGROUPS, /**< Admin: DeleteGroups: u.admin_request*/ RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS, /**< Admin: * DeleteConsumerGroupOffsets * u.admin_request */ RD_KAFKA_OP_CREATEACLS, /**< Admin: CreateAcls: u.admin_request*/ RD_KAFKA_OP_DESCRIBEACLS, /**< Admin: DescribeAcls: u.admin_request*/ RD_KAFKA_OP_DELETEACLS, /**< Admin: DeleteAcls: u.admin_request*/ - RD_KAFKA_OP_ADMIN_FANOUT, /**< Admin: fanout request */ - RD_KAFKA_OP_ADMIN_RESULT, /**< Admin API .._result_t */ - RD_KAFKA_OP_PURGE, /**< Purge queues */ - RD_KAFKA_OP_CONNECT, /**< Connect (to broker) */ - RD_KAFKA_OP_OAUTHBEARER_REFRESH, /**< Refresh OAUTHBEARER token */ - RD_KAFKA_OP_MOCK, /**< Mock cluster command */ - RD_KAFKA_OP_BROKER_MONITOR, /**< Broker state change */ - RD_KAFKA_OP_TXN, /**< Transaction command */ - RD_KAFKA_OP_GET_REBALANCE_PROTOCOL, /**< Get rebalance protocol */ - RD_KAFKA_OP_LEADERS, /**< Partition leader query */ - RD_KAFKA_OP_BARRIER, /**< Version barrier bump */ + RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS, /**< Admin: + * AlterConsumerGroupOffsets + * u.admin_request */ + RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS, /**< Admin: + * ListConsumerGroupOffsets + * u.admin_request */ + RD_KAFKA_OP_ADMIN_FANOUT, /**< Admin: fanout request */ + RD_KAFKA_OP_ADMIN_RESULT, /**< Admin API .._result_t */ + RD_KAFKA_OP_PURGE, /**< Purge queues */ + RD_KAFKA_OP_CONNECT, /**< Connect (to broker) */ + RD_KAFKA_OP_OAUTHBEARER_REFRESH, /**< Refresh OAUTHBEARER token */ + RD_KAFKA_OP_MOCK, /**< Mock cluster command */ + RD_KAFKA_OP_BROKER_MONITOR, /**< Broker state change */ + RD_KAFKA_OP_TXN, /**< Transaction command */ + RD_KAFKA_OP_GET_REBALANCE_PROTOCOL, /**< Get rebalance protocol */ + RD_KAFKA_OP_LEADERS, /**< Partition leader query */ + RD_KAFKA_OP_BARRIER, /**< Version barrier bump */ RD_KAFKA_OP__END } rd_kafka_op_type_t; @@ -290,7 +302,7 @@ struct { rd_kafka_topic_partition_list_t *partitions; /** Require stable (txn-commited) offsets */ - rd_bool_t require_stable; + rd_bool_t require_stable_offsets; int do_free; /* free .partitions on destroy() */ } offset_fetch; @@ -433,6 +445,7 @@ RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS, RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST, RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE, + RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST, } state; int32_t broker_id; /**< Requested broker id to diff -Nru librdkafka-1.9.2/src/rdkafka_partition.c librdkafka-2.0.2/src/rdkafka_partition.c --- librdkafka-1.9.2/src/rdkafka_partition.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_partition.c 2023-01-20 09:14:36.000000000 +0000 @@ -31,6 +31,7 @@ #include "rdkafka_request.h" #include "rdkafka_offset.h" #include "rdkafka_partition.h" +#include "rdkafka_fetcher.h" #include "rdregex.h" #include "rdports.h" /* rd_qsort_r() */ @@ -192,6 +193,8 @@ rktp->rktp_op_version = version; rko = rd_kafka_op_new(RD_KAFKA_OP_BARRIER); rko->rko_version = version; + rko->rko_prio = RD_KAFKA_PRIO_FLASH; + rko->rko_rktp = rd_kafka_toppar_keep(rktp); rd_kafka_q_enq(rktp->rktp_fetchq, rko); } @@ -1282,7 +1285,7 @@ rko->rko_replyq = replyq; rko->rko_u.offset_fetch.partitions = part; - rko->rko_u.offset_fetch.require_stable = + rko->rko_u.offset_fetch.require_stable_offsets = rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED; rko->rko_u.offset_fetch.do_free = 1; @@ -1930,171 +1933,6 @@ /** - * @brief Decide whether this toppar should be on the fetch list or not. - * - * Also: - * - update toppar's op version (for broker thread's copy) - * - finalize statistics (move rktp_offsets to rktp_offsets_fin) - * - * @returns the partition's Fetch backoff timestamp, or 0 if no backoff. - * - * @locality broker thread - * @locks none - */ -rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int force_remove) { - int should_fetch = 1; - const char *reason = ""; - int32_t version; - rd_ts_t ts_backoff = 0; - rd_bool_t lease_expired = rd_false; - - rd_kafka_toppar_lock(rktp); - - /* Check for preferred replica lease expiry */ - lease_expired = rktp->rktp_leader_id != rktp->rktp_broker_id && - rd_interval(&rktp->rktp_lease_intvl, - 5 * 60 * 1000 * 1000 /*5 minutes*/, 0) > 0; - if (lease_expired) { - /* delete_to_leader() requires no locks to be held */ - rd_kafka_toppar_unlock(rktp); - rd_kafka_toppar_delegate_to_leader(rktp); - rd_kafka_toppar_lock(rktp); - - reason = "preferred replica lease expired"; - should_fetch = 0; - goto done; - } - - /* Forced removal from fetch list */ - if (unlikely(force_remove)) { - reason = "forced removal"; - should_fetch = 0; - goto done; - } - - if (unlikely((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) != 0)) { - reason = "partition removed"; - should_fetch = 0; - goto done; - } - - /* Skip toppars not in active fetch state */ - if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE) { - reason = "not in active fetch state"; - should_fetch = 0; - goto done; - } - - /* Update broker thread's fetch op version */ - version = rktp->rktp_op_version; - if (version > rktp->rktp_fetch_version || - rktp->rktp_next_offset != rktp->rktp_last_next_offset || - rktp->rktp_offsets.fetch_offset == RD_KAFKA_OFFSET_INVALID) { - /* New version barrier, something was modified from the - * control plane. Reset and start over. - * Alternatively only the next_offset changed but not the - * barrier, which is the case when automatically triggering - * offset.reset (such as on PARTITION_EOF or - * OFFSET_OUT_OF_RANGE). */ - - rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCHDEC", - "Topic %s [%" PRId32 - "]: fetch decide: " - "updating to version %d (was %d) at " - "offset %" PRId64 " (was %" PRId64 ")", - rktp->rktp_rkt->rkt_topic->str, - rktp->rktp_partition, version, - rktp->rktp_fetch_version, rktp->rktp_next_offset, - rktp->rktp_offsets.fetch_offset); - - rd_kafka_offset_stats_reset(&rktp->rktp_offsets); - - /* New start offset */ - rktp->rktp_offsets.fetch_offset = rktp->rktp_next_offset; - rktp->rktp_last_next_offset = rktp->rktp_next_offset; - - rktp->rktp_fetch_version = version; - - /* Clear last error to propagate new fetch - * errors if encountered. */ - rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR; - - rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp, - version); - } - - - if (RD_KAFKA_TOPPAR_IS_PAUSED(rktp)) { - should_fetch = 0; - reason = "paused"; - - } else if (RD_KAFKA_OFFSET_IS_LOGICAL(rktp->rktp_next_offset)) { - should_fetch = 0; - reason = "no concrete offset"; - - } else if (rd_kafka_q_len(rktp->rktp_fetchq) >= - rkb->rkb_rk->rk_conf.queued_min_msgs) { - /* Skip toppars who's local message queue is already above - * the lower threshold. */ - reason = "queued.min.messages exceeded"; - should_fetch = 0; - - } else if ((int64_t)rd_kafka_q_size(rktp->rktp_fetchq) >= - rkb->rkb_rk->rk_conf.queued_max_msg_bytes) { - reason = "queued.max.messages.kbytes exceeded"; - should_fetch = 0; - - } else if (rktp->rktp_ts_fetch_backoff > rd_clock()) { - reason = "fetch backed off"; - ts_backoff = rktp->rktp_ts_fetch_backoff; - should_fetch = 0; - } - -done: - /* Copy offset stats to finalized place holder. */ - rktp->rktp_offsets_fin = rktp->rktp_offsets; - - if (rktp->rktp_fetch != should_fetch) { - rd_rkb_dbg( - rkb, FETCH, "FETCH", - "Topic %s [%" PRId32 - "] in state %s at offset %s " - "(%d/%d msgs, %" PRId64 - "/%d kb queued, " - "opv %" PRId32 ") is %s%s", - rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, - rd_kafka_fetch_states[rktp->rktp_fetch_state], - rd_kafka_offset2str(rktp->rktp_next_offset), - rd_kafka_q_len(rktp->rktp_fetchq), - rkb->rkb_rk->rk_conf.queued_min_msgs, - rd_kafka_q_size(rktp->rktp_fetchq) / 1024, - rkb->rkb_rk->rk_conf.queued_max_msg_kbytes, - rktp->rktp_fetch_version, - should_fetch ? "fetchable" : "not fetchable: ", reason); - - if (should_fetch) { - rd_dassert(rktp->rktp_fetch_version > 0); - rd_kafka_broker_active_toppar_add( - rkb, rktp, *reason ? reason : "fetchable"); - } else { - rd_kafka_broker_active_toppar_del(rkb, rktp, reason); - } - } - - rd_kafka_toppar_unlock(rktp); - - /* Non-fetching partitions will have an - * indefinate backoff, unless explicitly specified. */ - if (!should_fetch && !ts_backoff) - ts_backoff = RD_TS_MAX; - - return ts_backoff; -} - - -/** * @brief Serve a toppar in a consumer broker thread. * This is considered the fast path and should be minimal, * mostly focusing on fetch related mechanisms. diff -Nru librdkafka-1.9.2/src/rdkafka_partition.h librdkafka-2.0.2/src/rdkafka_partition.h --- librdkafka-1.9.2/src/rdkafka_partition.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_partition.h 2023-01-20 09:14:36.000000000 +0000 @@ -524,12 +524,6 @@ -rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp, - rd_kafka_broker_t *rkb, - int force_remove); - - - rd_ts_t rd_kafka_broker_consumer_toppar_serve(rd_kafka_broker_t *rkb, rd_kafka_toppar_t *rktp); diff -Nru librdkafka-1.9.2/src/rdkafka_queue.c librdkafka-2.0.2/src/rdkafka_queue.c --- librdkafka-1.9.2/src/rdkafka_queue.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_queue.c 2023-01-20 09:14:36.000000000 +0000 @@ -539,7 +539,8 @@ * * @locality Any thread. */ -static size_t rd_kafka_purge_outdated_messages(int32_t version, +static size_t rd_kafka_purge_outdated_messages(rd_kafka_toppar_t *rktp, + int32_t version, rd_kafka_message_t **rkmessages, size_t cnt) { size_t valid_count = 0; @@ -548,7 +549,8 @@ for (i = 0; i < cnt; i++) { rd_kafka_op_t *rko; rko = rkmessages[i]->_private; - if (rd_kafka_op_version_outdated(rko, version)) { + if (rko->rko_rktp == rktp && + rd_kafka_op_version_outdated(rko, version)) { /* This also destroys the corresponding rkmessage. */ rd_kafka_op_destroy(rko); } else if (i > valid_count) { @@ -620,19 +622,19 @@ mtx_unlock(&rkq->rkq_lock); - if (rd_kafka_op_version_outdated(rko, 0)) { - /* Outdated op, put on discard queue */ - TAILQ_INSERT_TAIL(&tmpq, rko, rko_link); - continue; - } - if (unlikely(rko->rko_type == RD_KAFKA_OP_BARRIER)) { cnt = (unsigned int)rd_kafka_purge_outdated_messages( - rko->rko_version, rkmessages, cnt); + rko->rko_rktp, rko->rko_version, rkmessages, cnt); rd_kafka_op_destroy(rko); continue; } + if (rd_kafka_op_version_outdated(rko, 0)) { + /* Outdated op, put on discard queue */ + TAILQ_INSERT_TAIL(&tmpq, rko, rko_link); + continue; + } + /* Serve non-FETCH callbacks */ res = rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL); diff -Nru librdkafka-1.9.2/src/rdkafka_request.c librdkafka-2.0.2/src/rdkafka_request.c --- librdkafka-1.9.2/src/rdkafka_request.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_request.c 2023-01-20 09:14:36.000000000 +0000 @@ -160,6 +160,10 @@ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED; break; + case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS: + actions |= RD_KAFKA_ERR_ACTION_RETRY; + break; + case RD_KAFKA_RESP_ERR__DESTROY: case RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT: case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE: @@ -941,60 +945,70 @@ rd_kafka_op_destroy(rko); } - - /** - * Send OffsetFetchRequest for toppar. + * Send OffsetFetchRequest for a consumer group id. * * Any partition with a usable offset will be ignored, if all partitions * have usable offsets then no request is sent at all but an empty * reply is enqueued on the replyq. * - * @param require_stable Whether broker should return unstable offsets - * (not yet transaction-committed). + * @param group_id Request offset for this group id. + * @param parts (optional) List of topic partitions to request, + * or NULL to return all topic partitions associated with the + * group. + * @param require_stable_offsets Whether broker should return stable offsets + * (transaction-committed). + * @param timeout Optional timeout to set to the buffer. */ void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, + const char *group_id, rd_kafka_topic_partition_list_t *parts, - rd_bool_t require_stable, + rd_bool_t require_stable_offsets, + int timeout, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque) { rd_kafka_buf_t *rkbuf; int16_t ApiVersion; - int PartCnt = 0; + size_t parts_size = 0; + int PartCnt = -1; ApiVersion = rd_kafka_broker_ApiVersion_supported( rkb, RD_KAFKAP_OffsetFetch, 0, 7, NULL); + if (parts) { + parts_size = parts->cnt * 32; + } + rkbuf = rd_kafka_buf_new_flexver_request( rkb, RD_KAFKAP_OffsetFetch, 1, - RD_KAFKAP_STR_SIZE(rkb->rkb_rk->rk_group_id) + 4 + - (parts->cnt * 32) + 1, - ApiVersion >= 6 /*flexver*/); + /* GroupId + rd_kafka_buf_write_arraycnt_pos + + * Topics + RequireStable */ + 32 + 4 + parts_size + 1, ApiVersion >= 6 /*flexver*/); /* ConsumerGroup */ - rd_kafka_buf_write_kstr(rkbuf, rkb->rkb_rk->rk_group_id); - - /* Sort partitions by topic */ - rd_kafka_topic_partition_list_sort_by_topic(parts); + rd_kafka_buf_write_str(rkbuf, group_id, -1); - /* Write partition list, filtering out partitions with valid offsets */ - PartCnt = rd_kafka_buf_write_topic_partitions( - rkbuf, parts, rd_false /*include invalid offsets*/, - rd_false /*skip valid offsets */, rd_false /*don't write offsets*/, - rd_false /*don't write epoch */, rd_false /*don't write metadata*/); + if (parts) { + /* Sort partitions by topic */ + rd_kafka_topic_partition_list_sort_by_topic(parts); + /* Write partition list, filtering out partitions with valid + * offsets */ + PartCnt = rd_kafka_buf_write_topic_partitions( + rkbuf, parts, rd_false /*include invalid offsets*/, + rd_false /*skip valid offsets */, + rd_false /*don't write offsets*/, + rd_false /*don't write epoch */, + rd_false /*don't write metadata*/); + } else { + rd_kafka_buf_write_arraycnt_pos(rkbuf); + } if (ApiVersion >= 7) { /* RequireStable */ - rd_kafka_buf_write_i8(rkbuf, require_stable); + rd_kafka_buf_write_i8(rkbuf, require_stable_offsets); } - rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); - - rd_rkb_dbg(rkb, TOPIC, "OFFSET", - "OffsetFetchRequest(v%d) for %d/%d partition(s)", ApiVersion, - PartCnt, parts->cnt); - if (PartCnt == 0) { /* No partitions needs OffsetFetch, enqueue empty * response right away. */ @@ -1005,12 +1019,36 @@ return; } + if (timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms) + rd_kafka_buf_set_abs_timeout(rkbuf, timeout + 1000, 0); + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); + + if (parts) { + rd_rkb_dbg( + rkb, TOPIC | RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_CONSUMER, + "OFFSET", + "Group %s OffsetFetchRequest(v%d) for %d/%d partition(s)", + group_id, ApiVersion, PartCnt, parts->cnt); + } else { + rd_rkb_dbg( + rkb, TOPIC | RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_CONSUMER, + "OFFSET", + "Group %s OffsetFetchRequest(v%d) for all partitions", + group_id, ApiVersion); + } + /* Let handler decide if retries should be performed */ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES; - rd_rkb_dbg(rkb, CGRP | RD_KAFKA_DBG_CONSUMER, "OFFSET", - "Fetch committed offsets for %d/%d partition(s)", PartCnt, - parts->cnt); + if (parts) { + rd_rkb_dbg(rkb, CGRP | RD_KAFKA_DBG_CONSUMER, "OFFSET", + "Fetch committed offsets for %d/%d partition(s)", + PartCnt, parts->cnt); + } else { + rd_rkb_dbg(rkb, CGRP | RD_KAFKA_DBG_CONSUMER, "OFFSET", + "Fetch committed offsets all the partitions"); + } rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); } @@ -1100,7 +1138,8 @@ rd_kafka_resp_err_t err, rd_kafka_buf_t *rkbuf, rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t *offsets) { + rd_kafka_topic_partition_list_t *offsets, + rd_bool_t ignore_cgrp) { const int log_decode_errors = LOG_ERR; int32_t TopicArrayCnt; int errcnt = 0; @@ -1111,7 +1150,7 @@ if (err) goto err; - if (request->rkbuf_reqhdr.ApiVersion >= 3) + if (rd_kafka_buf_ApiVersion(rkbuf) >= 3) rd_kafka_buf_read_throttle_time(rkbuf); rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt); @@ -1179,13 +1218,14 @@ RD_KAFKA_ERR_ACTION_END); - if (actions & RD_KAFKA_ERR_ACTION_FATAL) { + if (!ignore_cgrp && (actions & RD_KAFKA_ERR_ACTION_FATAL)) { rd_kafka_set_fatal_error(rk, err, "OffsetCommit failed: %s", rd_kafka_err2str(err)); return err; } - if (actions & RD_KAFKA_ERR_ACTION_REFRESH && rk->rk_cgrp) { + if (!ignore_cgrp && (actions & RD_KAFKA_ERR_ACTION_REFRESH) && + rk->rk_cgrp) { /* Mark coordinator dead or re-query for coordinator. * ..dead() will trigger a re-query. */ if (actions & RD_KAFKA_ERR_ACTION_SPECIAL) @@ -1196,7 +1236,7 @@ "OffsetCommitRequest failed"); } - if (actions & RD_KAFKA_ERR_ACTION_RETRY && + if (!ignore_cgrp && actions & RD_KAFKA_ERR_ACTION_RETRY && !(actions & RD_KAFKA_ERR_ACTION_PERMANENT) && rd_kafka_buf_retry(rkb, request)) return RD_KAFKA_RESP_ERR__IN_PROGRESS; @@ -1205,16 +1245,18 @@ return err; } - - /** * @brief Send OffsetCommitRequest for a list of partitions. * + * @param cgmetadata consumer group metadata. + * + * @param offsets - offsets to commit for each topic-partition. + * * @returns 0 if none of the partitions in \p offsets had valid offsets, * else 1. */ int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb, - rd_kafka_cgrp_t *rkcg, + rd_kafka_consumer_group_metadata_t *cgmetadata, rd_kafka_topic_partition_list_t *offsets, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, @@ -1240,19 +1282,20 @@ 100 + (offsets->cnt * 128)); /* ConsumerGroup */ - rd_kafka_buf_write_kstr(rkbuf, rkcg->rkcg_group_id); + rd_kafka_buf_write_str(rkbuf, cgmetadata->group_id, -1); /* v1,v2 */ if (ApiVersion >= 1) { /* ConsumerGroupGenerationId */ - rd_kafka_buf_write_i32(rkbuf, rkcg->rkcg_generation_id); + rd_kafka_buf_write_i32(rkbuf, cgmetadata->generation_id); /* ConsumerId */ - rd_kafka_buf_write_kstr(rkbuf, rkcg->rkcg_member_id); + rd_kafka_buf_write_str(rkbuf, cgmetadata->member_id, -1); } /* v7: GroupInstanceId */ if (ApiVersion >= 7) - rd_kafka_buf_write_kstr(rkbuf, rkcg->rkcg_group_instance_id); + rd_kafka_buf_write_str(rkbuf, cgmetadata->group_instance_id, + -1); /* v2-4: RetentionTime */ if (ApiVersion >= 2 && ApiVersion <= 4) @@ -1340,7 +1383,6 @@ return 1; } - /** * @brief Construct and send OffsetDeleteRequest to \p rkb * with the partitions in del_grpoffsets (DeleteConsumerGroupOffsets_t*) @@ -1726,43 +1768,133 @@ /** - * Send ListGroupsRequest + * @brief Construct and send ListGroupsRequest to \p rkb + * with the states (const char *) in \p states. + * Uses \p max_ApiVersion as maximum API version, + * pass -1 to use the maximum available version. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. */ -void rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_error_t *rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb, + int16_t max_ApiVersion, + const char **states, + size_t states_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + size_t i; + rd_bool_t is_flexver = rd_false; + + if (max_ApiVersion < 0) + max_ApiVersion = 4; - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_ListGroups, 0, 0); + if (max_ApiVersion > ApiVersion) { + /* Remark: don't check if max_ApiVersion is zero. + * As rd_kafka_broker_ApiVersion_supported cannot be checked + * in the application thread reliably . */ + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_ListGroups, 0, max_ApiVersion, NULL); + is_flexver = ApiVersion >= 3; + } + if (ApiVersion == -1) { + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + "ListGroupsRequest not supported by broker"); + } + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_ListGroups, 1, + /* rd_kafka_buf_write_arraycnt_pos + tags + StatesFilter */ + 4 + 1 + 32 * states_cnt, is_flexver); + + if (ApiVersion >= 4) { + size_t of_GroupsArrayCnt = + rd_kafka_buf_write_arraycnt_pos(rkbuf); + for (i = 0; i < states_cnt; i++) { + rd_kafka_buf_write_str(rkbuf, states[i], -1); + } + rd_kafka_buf_finalize_arraycnt(rkbuf, of_GroupsArrayCnt, i); + } + if (is_flexver) { + rd_kafka_buf_write_tags(rkbuf); + } + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + return NULL; } - /** - * Send DescribeGroupsRequest + * @brief Construct and send DescribeGroupsRequest to \p rkb + * with the groups (const char *) in \p groups. + * Uses \p max_ApiVersion as maximum API version, + * pass -1 to use the maximum available version. + * + * The response (unparsed) will be enqueued on \p replyq + * for handling by \p resp_cb (with \p opaque passed). + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. */ -void rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb, - const char **groups, - int group_cnt, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque) { +rd_kafka_error_t *rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb, + int16_t max_ApiVersion, + char **groups, + size_t group_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + size_t of_GroupsArrayCnt; - rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DescribeGroups, 1, - 32 * group_cnt); + if (max_ApiVersion < 0) + max_ApiVersion = 4; + + if (max_ApiVersion > ApiVersion) { + /* Remark: don't check if max_ApiVersion is zero. + * As rd_kafka_broker_ApiVersion_supported cannot be checked + * in the application thread reliably . */ + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_DescribeGroups, 0, max_ApiVersion, NULL); + } + + if (ApiVersion == -1) { + return rd_kafka_error_new( + RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, + "DescribeGroupsRequest not supported by broker"); + } - rd_kafka_buf_write_i32(rkbuf, group_cnt); + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_DescribeGroups, 1, + 4 /* rd_kafka_buf_write_arraycnt_pos */ + + 1 /* IncludeAuthorizedOperations */ + 1 /* tags */ + + 32 * group_cnt /* Groups */, + rd_false); + + /* write Groups */ + of_GroupsArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf); + rd_kafka_buf_finalize_arraycnt(rkbuf, of_GroupsArrayCnt, group_cnt); while (group_cnt-- > 0) rd_kafka_buf_write_str(rkbuf, groups[group_cnt], -1); + /* write IncludeAuthorizedOperations */ + if (ApiVersion >= 3) { + /* TODO: implement KIP-430 */ + rd_kafka_buf_write_bool(rkbuf, rd_false); + } + + rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0); rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + return NULL; } - - /** * @brief Generic handler for Metadata responses * diff -Nru librdkafka-1.9.2/src/rdkafka_request.h librdkafka-2.0.2/src/rdkafka_request.h --- librdkafka-1.9.2/src/rdkafka_request.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_request.h 2023-01-20 09:14:36.000000000 +0000 @@ -116,23 +116,25 @@ void *opaque); void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb, + const char *group_id, rd_kafka_topic_partition_list_t *parts, - rd_bool_t require_stable, + rd_bool_t require_stable_offsets, + int timeout, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, void *opaque); - - rd_kafka_resp_err_t rd_kafka_handle_OffsetCommit(rd_kafka_t *rk, rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err, rd_kafka_buf_t *rkbuf, rd_kafka_buf_t *request, - rd_kafka_topic_partition_list_t *offsets); + rd_kafka_topic_partition_list_t *offsets, + rd_bool_t ignore_cgrp); + int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb, - rd_kafka_cgrp_t *rkcg, + rd_kafka_consumer_group_metadata_t *cgmetadata, rd_kafka_topic_partition_list_t *offsets, rd_kafka_replyq_t replyq, rd_kafka_resp_cb_t *resp_cb, @@ -192,17 +194,21 @@ rd_kafka_buf_t *request, void *opaque); -void rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); - -void rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb, - const char **groups, - int group_cnt, - rd_kafka_replyq_t replyq, - rd_kafka_resp_cb_t *resp_cb, - void *opaque); +rd_kafka_error_t *rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb, + int16_t max_ApiVersion, + const char **states, + size_t states_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_error_t *rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb, + int16_t max_ApiVersion, + char **groups, + size_t group_cnt, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb, diff -Nru librdkafka-1.9.2/src/rdkafka_sasl.c librdkafka-2.0.2/src/rdkafka_sasl.c --- librdkafka-1.9.2/src/rdkafka_sasl.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_sasl.c 2023-01-20 09:14:36.000000000 +0000 @@ -488,3 +488,35 @@ return 0; #endif } + +/** + * Sets or resets the SASL (PLAIN or SCRAM) credentials used by this + * client when making new connections to brokers. + * + * @returns NULL on success or an error object on error. + */ +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, + const char *username, + const char *password) { + + if (!username || !password) + return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG, + "Username and password are required"); + + mtx_lock(&rk->rk_conf.sasl.lock); + + if (rk->rk_conf.sasl.username) + rd_free(rk->rk_conf.sasl.username); + rk->rk_conf.sasl.username = rd_strdup(username); + + if (rk->rk_conf.sasl.password) + rd_free(rk->rk_conf.sasl.password); + rk->rk_conf.sasl.password = rd_strdup(password); + + mtx_unlock(&rk->rk_conf.sasl.lock); + + rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT, + "SASL credentials updated"); + + return NULL; +} diff -Nru librdkafka-1.9.2/src/rdkafka_sasl_cyrus.c librdkafka-2.0.2/src/rdkafka_sasl_cyrus.c --- librdkafka-1.9.2/src/rdkafka_sasl_cyrus.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_sasl_cyrus.c 2023-01-20 09:14:36.000000000 +0000 @@ -91,8 +91,10 @@ const char *out; unsigned int outlen; + mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); r = sasl_client_step(state->conn, size > 0 ? buf : NULL, size, &interact, &out, &outlen); + mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); if (r >= 0) { /* Note: outlen may be 0 here for an empty response */ @@ -148,9 +150,11 @@ RD_KAFKA_DBG_SECURITY) { const char *user, *mech, *authsrc; + mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); if (sasl_getprop(state->conn, SASL_USERNAME, (const void **)&user) != SASL_OK) user = "(unknown)"; + mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); if (sasl_getprop(state->conn, SASL_MECHNAME, (const void **)&mech) != SASL_OK) @@ -356,6 +360,12 @@ switch (id) { case SASL_CB_USER: case SASL_CB_AUTHNAME: + /* Since cyrus expects the returned pointer to be stable + * and not have its content changed, but the username + * and password may be updated at anytime by the application + * calling sasl_set_credentials(), we need to lock + * rk_conf.sasl.lock before each call into cyrus-sasl. + * So when we get here the lock is already held. */ *result = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.username; break; @@ -381,6 +391,7 @@ rd_kafka_transport_t *rktrans = context; const char *password; + /* rk_conf.sasl.lock is already locked */ password = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.password; if (!password) { @@ -472,8 +483,11 @@ if (!state) return; - if (state->conn) + if (state->conn) { + mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); sasl_dispose(&state->conn); + mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); + } rd_free(state); } @@ -528,9 +542,11 @@ memcpy(state->callbacks, callbacks, sizeof(callbacks)); + mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); r = sasl_client_new(rk->rk_conf.sasl.service_name, hostname, NULL, NULL, /* no local & remote IP checks */ state->callbacks, 0, &state->conn); + mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); if (r != SASL_OK) { rd_snprintf(errstr, errstr_size, "%s", sasl_errstring(r, NULL, NULL)); @@ -550,8 +566,10 @@ unsigned int outlen; const char *mech = NULL; + mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); r = sasl_client_start(state->conn, rk->rk_conf.sasl.mechanisms, NULL, &out, &outlen, &mech); + mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock); if (r >= 0) if (rd_kafka_sasl_send(rktrans, out, outlen, errstr, diff -Nru librdkafka-1.9.2/src/rdkafka_sasl_oauthbearer_oidc.c librdkafka-2.0.2/src/rdkafka_sasl_oauthbearer_oidc.c --- librdkafka-1.9.2/src/rdkafka_sasl_oauthbearer_oidc.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_sasl_oauthbearer_oidc.c 2023-01-20 09:14:36.000000000 +0000 @@ -552,6 +552,8 @@ " received post_fields is %s", expected_post_fields, post_fields); + rd_free(post_fields); + RD_UT_PASS(); } @@ -582,6 +584,8 @@ " received post_fields is %s", expected_post_fields, post_fields); + rd_free(post_fields); + RD_UT_PASS(); } diff -Nru librdkafka-1.9.2/src/rdkafka_sasl_plain.c librdkafka-2.0.2/src/rdkafka_sasl_plain.c --- librdkafka-1.9.2/src/rdkafka_sasl_plain.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_sasl_plain.c 2023-01-20 09:14:36.000000000 +0000 @@ -74,13 +74,16 @@ char *buf; int of = 0; int zidlen = 0; - int cidlen = rk->rk_conf.sasl.username - ? (int)strlen(rk->rk_conf.sasl.username) - : 0; - int pwlen = rk->rk_conf.sasl.password - ? (int)strlen(rk->rk_conf.sasl.password) - : 0; + int cidlen, pwlen; + mtx_lock(&rk->rk_conf.sasl.lock); + + cidlen = rk->rk_conf.sasl.username + ? (int)strlen(rk->rk_conf.sasl.username) + : 0; + pwlen = rk->rk_conf.sasl.password + ? (int)strlen(rk->rk_conf.sasl.password) + : 0; buf = rd_alloca(zidlen + 1 + cidlen + 1 + pwlen + 1); @@ -95,6 +98,7 @@ /* passwd */ memcpy(&buf[of], rk->rk_conf.sasl.password, pwlen); of += pwlen; + mtx_unlock(&rk->rk_conf.sasl.lock); rd_rkb_dbg(rkb, SECURITY, "SASLPLAIN", "Sending SASL PLAIN (builtin) authentication token"); @@ -115,7 +119,13 @@ static int rd_kafka_sasl_plain_conf_validate(rd_kafka_t *rk, char *errstr, size_t errstr_size) { - if (!rk->rk_conf.sasl.username || !rk->rk_conf.sasl.password) { + rd_bool_t both_set; + + mtx_lock(&rk->rk_conf.sasl.lock); + both_set = rk->rk_conf.sasl.username && rk->rk_conf.sasl.password; + mtx_unlock(&rk->rk_conf.sasl.lock); + + if (!both_set) { rd_snprintf(errstr, errstr_size, "sasl.username and sasl.password must be set"); return -1; diff -Nru librdkafka-1.9.2/src/rdkafka_sasl_scram.c librdkafka-2.0.2/src/rdkafka_sasl_scram.c --- librdkafka-1.9.2/src/rdkafka_sasl_scram.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_sasl_scram.c 2023-01-20 09:14:36.000000000 +0000 @@ -397,9 +397,8 @@ int itcnt, rd_chariov_t *out) { struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; - const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; - rd_chariov_t SaslPassword = {.ptr = conf->sasl.password, - .size = strlen(conf->sasl.password)}; + rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; + rd_chariov_t SaslPassword = RD_ZERO_INIT; rd_chariov_t SaltedPassword = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; rd_chariov_t ClientKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; rd_chariov_t ServerKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)}; @@ -416,6 +415,11 @@ char *ClientProofB64; int i; + mtx_lock(&conf->sasl.lock); + rd_strdupa(&SaslPassword.ptr, conf->sasl.password); + mtx_unlock(&conf->sasl.lock); + SaslPassword.size = strlen(SaslPassword.ptr); + /* Constructing the ClientProof attribute (p): * * p = Base64-encoded ClientProof @@ -664,7 +668,7 @@ } else if ((attr_v = rd_kafka_sasl_scram_get_attr( in, 'v', "verifier in server-final-message", errstr, errstr_size))) { - const rd_kafka_conf_t *conf; + rd_kafka_conf_t *conf; /* Authentication succesful on server, * but we need to verify the ServerSignature too. */ @@ -686,9 +690,11 @@ conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; + mtx_lock(&conf->sasl.lock); rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER, "SCRAMAUTH", "Authenticated as %s using %s", conf->sasl.username, conf->sasl.mechanisms); + mtx_unlock(&conf->sasl.lock); rd_kafka_sasl_auth_done(rktrans); return 0; @@ -711,11 +717,13 @@ rd_chariov_t *out) { char *sasl_username; struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state; - const rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; + rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf; rd_kafka_sasl_scram_generate_nonce(&state->cnonce); + mtx_lock(&conf->sasl.lock); sasl_username = rd_kafka_sasl_safe_string(conf->sasl.username); + mtx_unlock(&conf->sasl.lock); out->size = strlen("n,,n=,r=") + strlen(sasl_username) + state->cnonce.size; @@ -842,8 +850,13 @@ char *errstr, size_t errstr_size) { const char *mech = rk->rk_conf.sasl.mechanisms; + rd_bool_t both_set; + + mtx_lock(&rk->rk_conf.sasl.lock); + both_set = rk->rk_conf.sasl.username && rk->rk_conf.sasl.password; + mtx_unlock(&rk->rk_conf.sasl.lock); - if (!rk->rk_conf.sasl.username || !rk->rk_conf.sasl.password) { + if (!both_set) { rd_snprintf(errstr, errstr_size, "sasl.username and sasl.password must be set"); return -1; diff -Nru librdkafka-1.9.2/src/rdkafka_ssl.c librdkafka-2.0.2/src/rdkafka_ssl.c --- librdkafka-1.9.2/src/rdkafka_ssl.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_ssl.c 2023-01-20 09:14:36.000000000 +0000 @@ -46,6 +46,10 @@ #include #include +#if OPENSSL_VERSION_NUMBER >= 0x30000000 +#include +#endif + #include #if !_WIN32 @@ -102,16 +106,21 @@ const char *rd_kafka_ssl_last_error_str(void) { static RD_TLS char errstr[256]; unsigned long l; - const char *file, *data; + const char *file, *data, *func; int line, flags; - l = ERR_peek_last_error_line_data(&file, &line, &data, &flags); +#if OPENSSL_VERSION_NUMBER >= 0x30000000 + l = ERR_peek_last_error_all(&file, &line, &func, &data, &flags); +#else + l = ERR_peek_last_error_line_data(&file, &line, &data, &flags); + func = ERR_func_error_string(l); +#endif + if (!l) return ""; rd_snprintf(errstr, sizeof(errstr), "%lu:%s:%s:%s:%d: %s", l, - ERR_lib_error_string(l), ERR_func_error_string(l), file, - line, + ERR_lib_error_string(l), func, file, line, ((flags & ERR_TXT_STRING) && data && *data) ? data : ERR_reason_error_string(l)); @@ -131,7 +140,7 @@ char *errstr, size_t errstr_size) { unsigned long l; - const char *file, *data; + const char *file, *data, *func; int line, flags; int cnt = 0; @@ -140,10 +149,19 @@ rk = rkb->rkb_rk; } - while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) != - 0) { + while ( +#if OPENSSL_VERSION_NUMBER >= 0x30000000 + (l = ERR_get_error_all(&file, &line, &func, &data, &flags)) +#else + (l = ERR_get_error_line_data(&file, &line, &data, &flags)) +#endif + ) { char buf[256]; +#if OPENSSL_VERSION_NUMBER < 0x30000000 + func = ERR_func_error_string(l); +#endif + if (cnt++ > 0) { /* Log last message */ if (rkb) @@ -157,10 +175,10 @@ if (!(flags & ERR_TXT_STRING) || !data || !*data) data = NULL; - /* Include openssl file:line if debugging is enabled */ + /* Include openssl file:line:func if debugging is enabled */ if (rk->rk_conf.log_level >= LOG_DEBUG) - rd_snprintf(errstr, errstr_size, "%s:%d: %s%s%s", file, - line, buf, data ? ": " : "", + rd_snprintf(errstr, errstr_size, "%s:%d:%s %s%s%s", + file, line, func, buf, data ? ": " : "", data ? data : ""); else rd_snprintf(errstr, errstr_size, "%s%s%s", buf, @@ -448,7 +466,7 @@ RD_KAFKA_SSL_ENDPOINT_ID_NONE) return 0; -#if OPENSSL_VERSION_NUMBER >= 0x10100000 +#if OPENSSL_VERSION_NUMBER >= 0x10100000 && !defined(OPENSSL_IS_BORINGSSL) if (!SSL_set1_host(rktrans->rktrans_ssl, name)) goto fail; #elif OPENSSL_VERSION_NUMBER >= 0x1000200fL /* 1.0.2 */ @@ -557,7 +575,11 @@ if (!rktrans->rktrans_rkb->rkb_rk->rk_conf.ssl.enable_verify) return 0; +#if OPENSSL_VERSION_NUMBER >= 0x30000000 + cert = SSL_get1_peer_certificate(rktrans->rktrans_ssl); +#else cert = SSL_get_peer_certificate(rktrans->rktrans_ssl); +#endif X509_free(cert); if (!cert) { rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR, @@ -614,6 +636,7 @@ else if (strstr(errstr, "tls_process_server_certificate:" "certificate verify failed") || + strstr(errstr, "error:0A000086") /*openssl3*/ || strstr(errstr, "get_server_certificate:" "certificate verify failed")) @@ -996,32 +1019,55 @@ /* CA as PEM string */ X509 *x509; X509_STORE *store; + BIO *bio; + int cnt = 0; /* Get the OpenSSL trust store */ store = SSL_CTX_get_cert_store(ctx); rd_assert(store != NULL); rd_kafka_dbg(rk, SECURITY, "SSL", - "Loading CA certificate from string"); + "Loading CA certificate(s) from string"); - x509 = rd_kafka_ssl_X509_from_string( - rk, rk->rk_conf.ssl.ca_pem); - if (!x509) { - rd_snprintf(errstr, errstr_size, - "ssl.ca.pem failed: " - "not in PEM format?: "); - return -1; + bio = + BIO_new_mem_buf((void *)rk->rk_conf.ssl.ca_pem, -1); + rd_assert(bio != NULL); + + /* Add all certificates to cert store */ + while ((x509 = PEM_read_bio_X509( + bio, NULL, rd_kafka_transport_ssl_passwd_cb, + rk))) { + if (!X509_STORE_add_cert(store, x509)) { + rd_snprintf(errstr, errstr_size, + "failed to add ssl.ca.pem " + "certificate " + "#%d to CA cert store: ", + cnt); + X509_free(x509); + BIO_free(bio); + return -1; + } + + X509_free(x509); + cnt++; } - if (!X509_STORE_add_cert(store, x509)) { + if (!BIO_eof(bio) || !cnt) { rd_snprintf(errstr, errstr_size, - "failed to add ssl.ca.pem to " - "CA cert store: "); - X509_free(x509); + "failed to read certificate #%d " + "from ssl.ca.pem: " + "not in PEM format?: ", + cnt); + BIO_free(bio); return -1; } - X509_free(x509); + BIO_free(bio); + + rd_kafka_dbg(rk, SECURITY, "SSL", + "Loaded %d CA certificate(s) from string", + cnt); + ca_probe = rd_false; } @@ -1246,7 +1292,7 @@ "Loading client's keystore file from %s", rk->rk_conf.ssl.keystore_location); - bio = BIO_new_file(rk->rk_conf.ssl.keystore_location, "r"); + bio = BIO_new_file(rk->rk_conf.ssl.keystore_location, "rb"); if (!bio) { rd_snprintf(errstr, errstr_size, "Failed to open ssl.keystore.location: " @@ -1310,7 +1356,7 @@ check_pkey = rd_true; } -#if OPENSSL_VERSION_NUMBER >= 0x10100000 +#if WITH_SSL_ENGINE /* * If applicable, use OpenSSL engine to fetch SSL certificate. */ @@ -1380,7 +1426,7 @@ check_pkey = rd_true; } -#endif +#endif /*WITH_SSL_ENGINE*/ /* Check that a valid private/public key combo was set. */ if (check_pkey && SSL_CTX_check_private_key(ctx) != 1) { @@ -1403,13 +1449,13 @@ SSL_CTX_free(rk->rk_conf.ssl.ctx); rk->rk_conf.ssl.ctx = NULL; -#if OPENSSL_VERSION_NUMBER >= 0x10100000 +#if WITH_SSL_ENGINE RD_IF_FREE(rk->rk_conf.ssl.engine, ENGINE_free); #endif } -#if OPENSSL_VERSION_NUMBER >= 0x10100000 +#if WITH_SSL_ENGINE /** * @brief Initialize and load OpenSSL engine, if configured. * @@ -1475,6 +1521,83 @@ #endif +#if OPENSSL_VERSION_NUMBER >= 0x30000000 +/** + * @brief Wrapper around OSSL_PROVIDER_unload() to expose a free(void*) API + * suitable for rd_list_t's free_cb. + */ +static void rd_kafka_ssl_OSSL_PROVIDER_free(void *ptr) { + OSSL_PROVIDER *prov = ptr; + (void)OSSL_PROVIDER_unload(prov); +} + + +/** + * @brief Load OpenSSL 3.0.x providers specified in comma-separated string. + * + * @remark Only the error preamble/prefix is written here, the actual + * OpenSSL error is retrieved from the OpenSSL error stack by + * the caller. + * + * @returns rd_false on failure (errstr will be written to), or rd_true + * on successs. + */ +static rd_bool_t rd_kafka_ssl_ctx_load_providers(rd_kafka_t *rk, + const char *providers_csv, + char *errstr, + size_t errstr_size) { + size_t provider_cnt, i; + char **providers = rd_string_split( + providers_csv, ',', rd_true /*skip empty*/, &provider_cnt); + + + if (!providers || !provider_cnt) { + rd_snprintf(errstr, errstr_size, + "ssl.providers expects a comma-separated " + "list of OpenSSL 3.0.x providers"); + if (providers) + rd_free(providers); + return rd_false; + } + + rd_list_init(&rk->rk_conf.ssl.loaded_providers, (int)provider_cnt, + rd_kafka_ssl_OSSL_PROVIDER_free); + + for (i = 0; i < provider_cnt; i++) { + const char *provider = providers[i]; + OSSL_PROVIDER *prov; + const char *buildinfo = NULL; + OSSL_PARAM request[] = {{"buildinfo", OSSL_PARAM_UTF8_PTR, + (void *)&buildinfo, 0, 0}, + {NULL, 0, NULL, 0, 0}}; + + prov = OSSL_PROVIDER_load(NULL, provider); + if (!prov) { + rd_snprintf(errstr, errstr_size, + "Failed to load OpenSSL provider \"%s\": ", + provider); + rd_free(providers); + return rd_false; + } + + if (!OSSL_PROVIDER_get_params(prov, request)) + buildinfo = "no buildinfo"; + + rd_kafka_dbg(rk, SECURITY, "SSL", + "OpenSSL provider \"%s\" loaded (%s)", provider, + buildinfo); + + rd_list_add(&rk->rk_conf.ssl.loaded_providers, prov); + } + + rd_free(providers); + + return rd_true; +} +#endif + + + /** * @brief Once per rd_kafka_t handle initialization of OpenSSL * @@ -1508,7 +1631,14 @@ if (errstr_size > 0) errstr[0] = '\0'; -#if OPENSSL_VERSION_NUMBER >= 0x10100000 +#if OPENSSL_VERSION_NUMBER >= 0x30000000 + if (rk->rk_conf.ssl.providers && + !rd_kafka_ssl_ctx_load_providers(rk, rk->rk_conf.ssl.providers, + errstr, errstr_size)) + goto fail; +#endif + +#if WITH_SSL_ENGINE if (rk->rk_conf.ssl.engine_location && !rk->rk_conf.ssl.engine) { rd_kafka_dbg(rk, SECURITY, "SSL", "Loading OpenSSL engine from \"%s\"", @@ -1600,12 +1730,18 @@ fail: r = (int)strlen(errstr); - rd_kafka_ssl_error(rk, NULL, errstr + r, - (int)errstr_size > r ? (int)errstr_size - r : 0); + /* If only the error preamble is provided in errstr and ending with + * "....: ", then retrieve the last error from the OpenSSL error stack, + * else treat the errstr as complete. */ + if (r > 2 && !strcmp(&errstr[r - 2], ": ")) + rd_kafka_ssl_error(rk, NULL, errstr + r, + (int)errstr_size > r ? (int)errstr_size - r + : 0); RD_IF_FREE(ctx, SSL_CTX_free); -#if OPENSSL_VERSION_NUMBER >= 0x10100000 +#if WITH_SSL_ENGINE RD_IF_FREE(rk->rk_conf.ssl.engine, ENGINE_free); #endif + rd_list_destroy(&rk->rk_conf.ssl.loaded_providers); return -1; } diff -Nru librdkafka-1.9.2/src/rdkafka_sticky_assignor.c librdkafka-2.0.2/src/rdkafka_sticky_assignor.c --- librdkafka-1.9.2/src/rdkafka_sticky_assignor.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_sticky_assignor.c 2023-01-20 09:14:36.000000000 +0000 @@ -1258,7 +1258,7 @@ /* for each eligible (subscribed and available) topic (\p atopic): * for each member subscribing to that topic: * and for each partition of that topic: - * add conusmer and partition to: + * add consumer and partition to: * partition2AllPotentialConsumers * consumer2AllPotentialPartitions */ diff -Nru librdkafka-1.9.2/src/rdkafka_timer.c librdkafka-2.0.2/src/rdkafka_timer.c --- librdkafka-1.9.2/src/rdkafka_timer.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_timer.c 2023-01-20 09:14:36.000000000 +0000 @@ -321,12 +321,17 @@ while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) && rtmr->rtmr_next <= now) { + rd_bool_t oneshot; rd_kafka_timer_unschedule(rkts, rtmr); /* If timer must only be fired once, - * disable it now prior to callback. */ - if (rtmr->rtmr_oneshot) + * disable it now prior to callback. + * + * NOTE: Oneshot timers are never touched again after + * the callback has been called to avoid use-after-free. + */ + if ((oneshot = rtmr->rtmr_oneshot)) rtmr->rtmr_interval = 0; rd_kafka_timers_unlock(rkts); @@ -337,7 +342,7 @@ /* Restart timer, unless it has been stopped, or * already reschedueld (start()ed) from callback. */ - if (rd_kafka_timer_started(rtmr) && + if (!oneshot && rd_kafka_timer_started(rtmr) && !rd_kafka_timer_scheduled(rtmr)) rd_kafka_timer_schedule(rkts, rtmr, 0); } diff -Nru librdkafka-1.9.2/src/rdkafka_txnmgr.c librdkafka-2.0.2/src/rdkafka_txnmgr.c --- librdkafka-1.9.2/src/rdkafka_txnmgr.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdkafka_txnmgr.c 2023-01-20 09:14:36.000000000 +0000 @@ -43,10 +43,18 @@ #include "rdrand.h" -static void rd_kafka_txn_curr_api_reply_error(rd_kafka_q_t *rkq, - rd_kafka_error_t *error); static void rd_kafka_txn_coord_timer_start(rd_kafka_t *rk, int timeout_ms); +#define rd_kafka_txn_curr_api_set_result(rk, actions, error) \ + rd_kafka_txn_curr_api_set_result0(__FUNCTION__, __LINE__, rk, actions, \ + error) +static void rd_kafka_txn_curr_api_set_result0(const char *func, + int line, + rd_kafka_t *rk, + int actions, + rd_kafka_error_t *error); + + /** * @return a normalized error code, this for instance abstracts different @@ -58,6 +66,8 @@ case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH: case RD_KAFKA_RESP_ERR_PRODUCER_FENCED: return RD_KAFKA_RESP_ERR__FENCED; + case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE: + return RD_KAFKA_RESP_ERR__TIMED_OUT; default: return err; } @@ -94,7 +104,7 @@ * * @param the required states, ended by a -1 sentinel. * - * @locks rd_kafka_*lock(rk) MUST be held + * @locks_required rd_kafka_*lock(rk) MUST be held * @locality any */ static RD_INLINE rd_kafka_error_t * @@ -176,15 +186,21 @@ return curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT || curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION; - case RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION: + case RD_KAFKA_TXN_STATE_BEGIN_ABORT: return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION || + curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION || curr == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR; + case RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION: + return curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT; + case RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED: - return curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION; + return curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT || + curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION; case RD_KAFKA_TXN_STATE_ABORTABLE_ERROR: - if (curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION || + if (curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT || + curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION || curr == RD_KAFKA_TXN_STATE_FATAL_ERROR) { /* Ignore sub-sequent abortable errors in * these states. */ @@ -216,7 +232,7 @@ * was invalid. * * @locality rdkafka main thread - * @locks rd_kafka_wrlock MUST be held + * @locks_required rd_kafka_wrlock MUST be held */ static void rd_kafka_txn_set_state(rd_kafka_t *rk, rd_kafka_txn_state_t new_state) { @@ -258,6 +274,25 @@ /** + * @returns the current transaction timeout, i.e., the time remaining in + * the current transaction. + * + * @remark The remaining timeout is currently not tracked, so this function + * will always return the remaining time based on transaction.timeout.ms + * and we rely on the broker to enforce the actual remaining timeout. + * This is still better than not having a timeout cap at all, which + * used to be the case. + * It's also tricky knowing exactly what the controller thinks the + * remaining transaction time is. + * + * @locks_required rd_kafka_*lock(rk) MUST be held. + */ +static RD_INLINE rd_ts_t rd_kafka_txn_current_timeout(const rd_kafka_t *rk) { + return rd_timeout_init(rk->rk_conf.eos.transaction_timeout_ms); +} + + +/** * @brief An unrecoverable transactional error has occurred. * * @param do_lock RD_DO_LOCK: rd_kafka_wrlock(rk) will be acquired and released, @@ -290,19 +325,16 @@ rd_free(rk->rk_eos.txn_errstr); rk->rk_eos.txn_errstr = rd_strdup(errstr); - if (rk->rk_eos.txn_init_rkq) { - /* If application has called init_transactions() and - * it has now failed, reply to the app. */ - rd_kafka_txn_curr_api_reply_error( - rk->rk_eos.txn_init_rkq, - rd_kafka_error_new_fatal(err, "%s", errstr)); - rk->rk_eos.txn_init_rkq = NULL; - } - rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_FATAL_ERROR); if (do_lock) rd_kafka_wrunlock(rk); + + /* If application has called a transactional API and + * it has now failed, reply to the app. + * If there is no currently called API then this is a no-op. */ + rd_kafka_txn_curr_api_set_result( + rk, 0, rd_kafka_error_new_fatal(err, "%s", errstr)); } @@ -374,73 +406,292 @@ /** - * @brief Send op reply to the application which is blocking - * on one of the transaction APIs and reset the current API. + * @brief Send request-reply op to txnmgr callback, waits for a reply + * or timeout, and returns an error object or NULL on success. * - * @param rkq is the queue to send the reply on, which may be NULL or disabled. - * The \p rkq refcount is decreased by this function. - * @param error Optional error object, or NULL. + * @remark Does not alter the current API state. * - * @locality rdkafka main thread - * @locks any + * @returns an error object on failure, else NULL. + * + * @locality application thread + * + * @locks_acquired rk->rk_eos.txn_curr_api.lock */ -static void rd_kafka_txn_curr_api_reply_error(rd_kafka_q_t *rkq, - rd_kafka_error_t *error) { - rd_kafka_op_t *rko; +#define rd_kafka_txn_op_req(rk, op_cb, abs_timeout) \ + rd_kafka_txn_op_req0(__FUNCTION__, __LINE__, rk, \ + rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, op_cb), \ + abs_timeout) +#define rd_kafka_txn_op_req1(rk, rko, abs_timeout) \ + rd_kafka_txn_op_req0(__FUNCTION__, __LINE__, rk, rko, abs_timeout) +static rd_kafka_error_t *rd_kafka_txn_op_req0(const char *func, + int line, + rd_kafka_t *rk, + rd_kafka_op_t *rko, + rd_ts_t abs_timeout) { + rd_kafka_error_t *error = NULL; + rd_bool_t has_result = rd_false; - if (!rkq) { - if (error) - rd_kafka_error_destroy(error); - return; + mtx_lock(&rk->rk_eos.txn_curr_api.lock); + + /* See if there's already a result, if so return that immediately. */ + if (rk->rk_eos.txn_curr_api.has_result) { + error = rk->rk_eos.txn_curr_api.error; + rk->rk_eos.txn_curr_api.error = NULL; + rk->rk_eos.txn_curr_api.has_result = rd_false; + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + rd_kafka_op_destroy(rko); + rd_kafka_dbg(rk, EOS, "OPREQ", + "%s:%d: %s: returning already set result: %s", + func, line, rk->rk_eos.txn_curr_api.name, + error ? rd_kafka_error_string(error) : "Success"); + return error; } - rko = rd_kafka_op_new(RD_KAFKA_OP_TXN | RD_KAFKA_OP_REPLY); + /* Send one-way op to txnmgr */ + if (!rd_kafka_q_enq(rk->rk_ops, rko)) + RD_BUG("rk_ops queue disabled"); - if (error) { - rko->rko_error = error; - rko->rko_err = rd_kafka_error_code(error); + /* Wait for result to be set, or timeout */ + do { + if (cnd_timedwait_ms(&rk->rk_eos.txn_curr_api.cnd, + &rk->rk_eos.txn_curr_api.lock, + rd_timeout_remains(abs_timeout)) == + thrd_timedout) + break; + } while (!rk->rk_eos.txn_curr_api.has_result); + + + + if ((has_result = rk->rk_eos.txn_curr_api.has_result)) { + rk->rk_eos.txn_curr_api.has_result = rd_false; + error = rk->rk_eos.txn_curr_api.error; + rk->rk_eos.txn_curr_api.error = NULL; } - rd_kafka_q_enq(rkq, rko); + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + + /* If there was no reply it means the background operation is still + * in progress and its result will be set later, so the application + * should call this API again to resume. */ + if (!has_result) { + error = rd_kafka_error_new_retriable( + RD_KAFKA_RESP_ERR__TIMED_OUT, + "Timed out waiting for operation to finish, " + "retry call to resume"); + } - rd_kafka_q_destroy(rkq); + return error; } + /** - * @brief Wrapper for rd_kafka_txn_curr_api_reply_error() that takes - * an error code and format string. + * @brief Begin (or resume) a public API call. * - * @param rkq is the queue to send the reply on, which may be NULL or disabled. - * The \p rkq refcount is decreased by this function. - * @param actions Optional response actions (RD_KAFKA_ERR_ACTION_..). - * RD_KAFKA_ERR_ACTION_FATAL -> set_fatal(), - * RD_KAFKA_ERR_ACTION_PERMANENT -> set_txn_requires_abort(), - * RD_KAFKA_ERR_ACTION_RETRY -> set_retriable(), - * @param err API error code. - * @param errstr_fmt If err is set, a human readable error format string. + * This function will prevent conflicting calls. * - * @locality rdkafka main thread - * @locks any + * @returns an error on failure, or NULL on success. + * + * @locality application thread + * + * @locks_acquired rk->rk_eos.txn_curr_api.lock */ -static void rd_kafka_txn_curr_api_reply(rd_kafka_q_t *rkq, - int actions, - rd_kafka_resp_err_t err, - const char *errstr_fmt, - ...) RD_FORMAT(printf, 4, 5); - -static void rd_kafka_txn_curr_api_reply(rd_kafka_q_t *rkq, - int actions, - rd_kafka_resp_err_t err, - const char *errstr_fmt, - ...) { +static rd_kafka_error_t *rd_kafka_txn_curr_api_begin(rd_kafka_t *rk, + const char *api_name, + rd_bool_t cap_timeout, + int timeout_ms, + rd_ts_t *abs_timeoutp) { rd_kafka_error_t *error = NULL; - if (err) { - va_list ap; - va_start(ap, errstr_fmt); - error = rd_kafka_error_new_v(err, errstr_fmt, ap); - va_end(ap); + if ((error = rd_kafka_ensure_transactional(rk))) + return error; + + rd_kafka_rdlock(rk); /* Need lock for retrieving the states */ + rd_kafka_dbg(rk, EOS, "TXNAPI", + "Transactional API called: %s " + "(in txn state %s, idemp state %s, API timeout %d)", + api_name, rd_kafka_txn_state2str(rk->rk_eos.txn_state), + rd_kafka_idemp_state2str(rk->rk_eos.idemp_state), + timeout_ms); + rd_kafka_rdunlock(rk); + + mtx_lock(&rk->rk_eos.txn_curr_api.lock); + + + /* Make sure there is no other conflicting in-progress API call, + * and that this same call is not currently under way in another thread. + */ + if (unlikely(*rk->rk_eos.txn_curr_api.name && + strcmp(rk->rk_eos.txn_curr_api.name, api_name))) { + /* Another API is being called. */ + error = rd_kafka_error_new_retriable( + RD_KAFKA_RESP_ERR__CONFLICT, + "Conflicting %s API call is already in progress", + rk->rk_eos.txn_curr_api.name); + + } else if (unlikely(rk->rk_eos.txn_curr_api.calling)) { + /* There is an active call to this same API + * from another thread. */ + error = rd_kafka_error_new_retriable( + RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS, + "Simultaneous %s API calls not allowed", + rk->rk_eos.txn_curr_api.name); + + } else if (*rk->rk_eos.txn_curr_api.name) { + /* Resumed call */ + rk->rk_eos.txn_curr_api.calling = rd_true; + + } else { + /* New call */ + rd_snprintf(rk->rk_eos.txn_curr_api.name, + sizeof(rk->rk_eos.txn_curr_api.name), "%s", + api_name); + rk->rk_eos.txn_curr_api.calling = rd_true; + rd_assert(!rk->rk_eos.txn_curr_api.error); + } + + if (!error && abs_timeoutp) { + rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); + + if (cap_timeout) { + /* Cap API timeout to remaining transaction timeout */ + rd_ts_t abs_txn_timeout = + rd_kafka_txn_current_timeout(rk); + if (abs_timeout > abs_txn_timeout || + abs_timeout == RD_POLL_INFINITE) + abs_timeout = abs_txn_timeout; + } + + *abs_timeoutp = abs_timeout; + } + + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + + return error; +} + + + +/** + * @brief Return from public API. + * + * This function updates the current API state and must be used in + * all return statements from the public txn API. + * + * @param resumable If true and the error is retriable, the current API state + * will be maintained to allow a future call to the same API + * to resume the background operation that is in progress. + * @param error The error object, if not NULL, is simply inspected and returned. + * + * @returns the \p error object as-is. + * + * @locality application thread + * @locks_acquired rk->rk_eos.txn_curr_api.lock + */ +#define rd_kafka_txn_curr_api_return(rk, resumable, error) \ + rd_kafka_txn_curr_api_return0(__FUNCTION__, __LINE__, rk, resumable, \ + error) +static rd_kafka_error_t * +rd_kafka_txn_curr_api_return0(const char *func, + int line, + rd_kafka_t *rk, + rd_bool_t resumable, + rd_kafka_error_t *error) { + + mtx_lock(&rk->rk_eos.txn_curr_api.lock); + + rd_kafka_dbg( + rk, EOS, "TXNAPI", "Transactional API %s return%s at %s:%d: %s", + rk->rk_eos.txn_curr_api.name, + resumable && rd_kafka_error_is_retriable(error) ? " resumable" : "", + func, line, error ? rd_kafka_error_string(error) : "Success"); + + rd_assert(*rk->rk_eos.txn_curr_api.name); + rd_assert(rk->rk_eos.txn_curr_api.calling); + + rk->rk_eos.txn_curr_api.calling = rd_false; + + /* Reset the current API call so that other APIs may be called, + * unless this is a resumable API and the error is retriable. */ + if (!resumable || (error && !rd_kafka_error_is_retriable(error))) { + *rk->rk_eos.txn_curr_api.name = '\0'; + /* It is possible for another error to have been set, + * typically when a fatal error is raised, so make sure + * we're not destroying the error we're supposed to return. */ + if (rk->rk_eos.txn_curr_api.error != error) + rd_kafka_error_destroy(rk->rk_eos.txn_curr_api.error); + rk->rk_eos.txn_curr_api.error = NULL; + } + + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + + return error; +} + + + +/** + * @brief Set the (possibly intermediary) result for the current API call. + * + * The result is \p error NULL for success or \p error object on failure. + * If the application is actively blocked on the call the result will be + * sent on its replyq, otherwise the result will be stored for future retrieval + * the next time the application calls the API again. + * + * @locality rdkafka main thread + * @locks_acquired rk->rk_eos.txn_curr_api.lock + */ +static void rd_kafka_txn_curr_api_set_result0(const char *func, + int line, + rd_kafka_t *rk, + int actions, + rd_kafka_error_t *error) { + + mtx_lock(&rk->rk_eos.txn_curr_api.lock); + + if (!*rk->rk_eos.txn_curr_api.name) { + /* No current API being called, this could happen + * if the application thread API deemed the API was done, + * or for fatal errors that attempt to set the result + * regardless of current API state. + * In this case we simply throw away this result. */ + if (error) + rd_kafka_error_destroy(error); + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + return; + } + + rd_kafka_dbg(rk, EOS, "APIRESULT", + "Transactional API %s (intermediary%s) result set " + "at %s:%d: %s (%sprevious result%s%s)", + rk->rk_eos.txn_curr_api.name, + rk->rk_eos.txn_curr_api.calling ? ", calling" : "", func, + line, error ? rd_kafka_error_string(error) : "Success", + rk->rk_eos.txn_curr_api.has_result ? "" : "no ", + rk->rk_eos.txn_curr_api.error ? ": " : "", + rd_kafka_error_string(rk->rk_eos.txn_curr_api.error)); + + rk->rk_eos.txn_curr_api.has_result = rd_true; + + + if (rk->rk_eos.txn_curr_api.error) { + /* If there's already an error it typically means + * a fatal error has been raised, so nothing more to do here. */ + rd_kafka_dbg( + rk, EOS, "APIRESULT", + "Transactional API %s error " + "already set: %s", + rk->rk_eos.txn_curr_api.name, + rd_kafka_error_string(rk->rk_eos.txn_curr_api.error)); + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + + if (error) + rd_kafka_error_destroy(error); + + return; + } + + if (error) { if (actions & RD_KAFKA_ERR_ACTION_FATAL) rd_kafka_error_set_fatal(error); else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) @@ -449,7 +700,12 @@ rd_kafka_error_set_retriable(error); } - rd_kafka_txn_curr_api_reply_error(rkq, error); + rk->rk_eos.txn_curr_api.error = error; + error = NULL; + cnd_broadcast(&rk->rk_eos.txn_curr_api.cnd); + + + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); } @@ -463,53 +719,36 @@ */ void rd_kafka_txn_idemp_state_change(rd_kafka_t *rk, rd_kafka_idemp_state_t idemp_state) { - rd_bool_t reply_assigned = rd_false; + rd_bool_t set_result = rd_false; if (idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED && rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_WAIT_PID) { /* Application is calling (or has called) init_transactions() */ RD_UT_COVERAGE(1); rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY_NOT_ACKED); - reply_assigned = rd_true; + set_result = rd_true; } else if (idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED && - rk->rk_eos.txn_state == - RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) { + (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_BEGIN_ABORT || + rk->rk_eos.txn_state == + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION)) { /* Application is calling abort_transaction() as we're * recovering from a fatal idempotence error. */ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); - reply_assigned = rd_true; + set_result = rd_true; } else if (idemp_state == RD_KAFKA_IDEMP_STATE_FATAL_ERROR && rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_FATAL_ERROR) { /* A fatal error has been raised. */ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_FATAL_ERROR); - if (rk->rk_eos.txn_init_rkq) { - /* Application has called init_transactions() or - * abort_transaction() and it has now failed, - * reply to the app. */ - rd_kafka_txn_curr_api_reply_error( - rk->rk_eos.txn_init_rkq, - rd_kafka_error_new_fatal( - rk->rk_eos.txn_err ? rk->rk_eos.txn_err - : RD_KAFKA_RESP_ERR__FATAL, - "Fatal error raised by " - "idempotent producer while " - "retrieving PID: %s", - rk->rk_eos.txn_errstr ? rk->rk_eos.txn_errstr - : "see previous logs")); - rk->rk_eos.txn_init_rkq = NULL; - } } - if (reply_assigned && rk->rk_eos.txn_init_rkq) { + if (set_result) { /* Application has called init_transactions() or * abort_transaction() and it is now complete, * reply to the app. */ - rd_kafka_txn_curr_api_reply(rk->rk_eos.txn_init_rkq, 0, - RD_KAFKA_RESP_ERR_NO_ERROR, NULL); - rk->rk_eos.txn_init_rkq = NULL; + rd_kafka_txn_curr_api_set_result(rk, 0, NULL); } } @@ -579,6 +818,7 @@ int actions = 0; int retry_backoff_ms = 500; /* retry backoff */ rd_kafka_resp_err_t reset_coord_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_bool_t require_bump = rd_false; if (err) goto done; @@ -681,6 +921,7 @@ case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID: case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING: + require_bump = rd_true; p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT; err = ErrorCode; request_error = rd_true; @@ -819,13 +1060,22 @@ rd_kafka_err2str(err)); } else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) { - /* Treat all other permanent errors as abortable errors */ - rd_kafka_txn_set_abortable_error( - rk, err, - "Failed to add partition(s) to transaction " - "on broker %s: %s (after %d ms)", - rd_kafka_broker_name(rkb), rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent / 1000)); + /* Treat all other permanent errors as abortable errors. + * If an epoch bump is required let idempo sort it out. */ + if (require_bump) + rd_kafka_idemp_drain_epoch_bump( + rk, err, + "Failed to add partition(s) to transaction " + "on broker %s: %s (after %d ms)", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000)); + else + rd_kafka_txn_set_abortable_error( + rk, err, + "Failed to add partition(s) to transaction " + "on broker %s: %s (after %d ms)", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000)); } else { /* Schedule registration of any new or remaining partitions */ @@ -865,7 +1115,7 @@ } /* Get pid, checked later */ - pid = rd_kafka_idemp_get_pid0(rk, rd_false /*dont-lock*/); + pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false); rd_kafka_rdunlock(rk); @@ -1000,247 +1250,6 @@ /** - * @brief Op timeout callback which fails the current transaction. - * - * @locality rdkafka main thread - * @locks none - */ -static void rd_kafka_txn_curr_api_abort_timeout_cb(rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_q_t *rkq = arg; - - rd_kafka_txn_set_abortable_error( - rkts->rkts_rk, RD_KAFKA_RESP_ERR__TIMED_OUT, - "Transactional API operation (%s) timed out", - rkq->rkq_rk->rk_eos.txn_curr_api.name); - - rd_kafka_txn_curr_api_reply_error( - rkq, rd_kafka_error_new_txn_requires_abort( - RD_KAFKA_RESP_ERR__TIMED_OUT, - "Transactional API operation (%s) timed out", - rkq->rkq_rk->rk_eos.txn_curr_api.name)); -} - -/** - * @brief Op timeout callback which does not fail the current transaction, - * and sets the retriable flag on the error. - * - * @locality rdkafka main thread - * @locks none - */ -static void rd_kafka_txn_curr_api_retriable_timeout_cb(rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_q_t *rkq = arg; - - rd_kafka_txn_curr_api_reply_error( - rkq, - rd_kafka_error_new_retriable(RD_KAFKA_RESP_ERR__TIMED_OUT, - "Transactional operation timed out")); -} - - -/** - * @brief Op timeout callback which does not fail the current transaction. - * - * @locality rdkafka main thread - * @locks none - */ -static void rd_kafka_txn_curr_api_timeout_cb(rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_q_t *rkq = arg; - - rd_kafka_txn_curr_api_reply(rkq, 0, RD_KAFKA_RESP_ERR__TIMED_OUT, - "Transactional operation timed out"); -} - -/** - * @brief Op timeout callback for init_transactions() that uses the - * the last txn_init_err as error code. - * - * @locality rdkafka main thread - * @locks none - */ -static void rd_kafka_txn_curr_api_init_timeout_cb(rd_kafka_timers_t *rkts, - void *arg) { - rd_kafka_q_t *rkq = arg; - rd_kafka_error_t *error; - rd_kafka_resp_err_t err = rkts->rkts_rk->rk_eos.txn_init_err; - - if (!err) - err = RD_KAFKA_RESP_ERR__TIMED_OUT; - - error = rd_kafka_error_new(err, "Failed to initialize Producer ID: %s", - rd_kafka_err2str(err)); - - /* init_transactions() timeouts are retriable */ - if (err == RD_KAFKA_RESP_ERR__TIMED_OUT || - err == RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) - rd_kafka_error_set_retriable(error); - - rd_kafka_txn_curr_api_reply_error(rkq, error); -} - - - -/** - * @brief Reset the current API, typically because it was completed - * without timeout. - * - * @param for_reuse If true there will be a sub-sequent curr_api_req - * for the same API. E.g., the op_commit_transaction - * following the op_begin_commit_transaction(). - * - * @locality rdkafka main thread - * @locks rd_kafka_wrlock(rk) MUST be held - */ -static void rd_kafka_txn_curr_api_reset(rd_kafka_t *rk, rd_bool_t for_reuse) { - rd_bool_t timer_was_stopped; - rd_kafka_q_t *rkq; - - /* Always stop timer and loose refcnt to reply queue. */ - rkq = rk->rk_eos.txn_curr_api.tmr.rtmr_arg; - timer_was_stopped = rd_kafka_timer_stop( - &rk->rk_timers, &rk->rk_eos.txn_curr_api.tmr, RD_DO_LOCK); - - if (rkq && timer_was_stopped) { - /* Remove the stopped timer's reply queue reference - * since the timer callback will not have fired if - * we stopped the timer. */ - rd_kafka_q_destroy(rkq); - } - - /* Don't reset current API if it is to be reused */ - if (for_reuse) - return; - - *rk->rk_eos.txn_curr_api.name = '\0'; - rk->rk_eos.txn_curr_api.flags = 0; -} - - -/** - * @brief Sets the current API op (representing a blocking application API call) - * and a timeout for the same, and sends the op to the transaction - * manager thread (rdkafka main thread) for processing. - * - * If the timeout expires the rko will fail with ERR__TIMED_OUT - * and the txnmgr state will be adjusted according to \p abort_on_timeout: - * if true, the txn will transition to ABORTABLE_ERROR, else remain in - * the current state. - * - * This call will block until a response is received from the rdkafka - * main thread. - * - * Use rd_kafka_txn_curr_api_reset() when operation finishes prior - * to the timeout. - * - * @param rko Op to send to txnmgr. - * @param flags See RD_KAFKA_TXN_CURR_API_F_.. flags in rdkafka_int.h. - * - * @returns an error, or NULL on success. - * - * @locality application thread - * @locks none - */ -static rd_kafka_error_t *rd_kafka_txn_curr_api_req(rd_kafka_t *rk, - const char *name, - rd_kafka_op_t *rko, - int timeout_ms, - int flags) { - rd_kafka_op_t *reply; - rd_bool_t reuse = rd_false; - rd_bool_t for_reuse; - rd_kafka_q_t *tmpq = NULL; - rd_kafka_error_t *error = NULL; - - /* Strip __FUNCTION__ name's rd_kafka_ prefix since it will - * not make sense in high-level language bindings. */ - if (!strncmp(name, "rd_kafka_", strlen("rd_kafka_"))) - name += strlen("rd_kafka_"); - - if (flags & RD_KAFKA_TXN_CURR_API_F_REUSE) { - /* Reuse the current API call state. */ - flags &= ~RD_KAFKA_TXN_CURR_API_F_REUSE; - reuse = rd_true; - } - - rd_kafka_wrlock(rk); - - rd_kafka_dbg(rk, EOS, "TXNAPI", - "Transactional API called: %s " - "(in txn state %s, idemp state %s)", - name, rd_kafka_txn_state2str(rk->rk_eos.txn_state), - rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); - - /* First set for_reuse to the current flags to match with - * the passed flags. */ - for_reuse = !!(rk->rk_eos.txn_curr_api.flags & - RD_KAFKA_TXN_CURR_API_F_FOR_REUSE); - - if ((for_reuse && !reuse) || - (!for_reuse && *rk->rk_eos.txn_curr_api.name)) { - error = rd_kafka_error_new( - RD_KAFKA_RESP_ERR__STATE, - "Conflicting %s call already in progress", - rk->rk_eos.txn_curr_api.name); - rd_kafka_wrunlock(rk); - rd_kafka_op_destroy(rko); - return error; - } - - rd_assert(for_reuse == reuse); - - rd_snprintf(rk->rk_eos.txn_curr_api.name, - sizeof(rk->rk_eos.txn_curr_api.name), "%s", name); - - tmpq = rd_kafka_q_new(rk); - - rk->rk_eos.txn_curr_api.flags |= flags; - - /* Then update for_reuse to the passed flags so that - * api_reset() will not reset curr APIs that are to be reused, - * but a sub-sequent _F_REUSE call will reset it. */ - for_reuse = !!(flags & RD_KAFKA_TXN_CURR_API_F_FOR_REUSE); - - /* If no timeout has been specified, use the transaction.timeout.ms */ - if (timeout_ms < 0) - timeout_ms = rk->rk_conf.eos.transaction_timeout_ms; - - if (timeout_ms >= 0) { - rd_kafka_q_keep(tmpq); - rd_kafka_timer_start_oneshot( - &rk->rk_timers, &rk->rk_eos.txn_curr_api.tmr, rd_true, - timeout_ms * 1000, - !strcmp(name, "init_transactions") - ? rd_kafka_txn_curr_api_init_timeout_cb - : (flags & RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT - ? rd_kafka_txn_curr_api_abort_timeout_cb - : (flags & RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT - ? rd_kafka_txn_curr_api_retriable_timeout_cb - : rd_kafka_txn_curr_api_timeout_cb)), - tmpq); - } - rd_kafka_wrunlock(rk); - - /* Send op to rdkafka main thread and wait for reply */ - reply = rd_kafka_op_req0(rk->rk_ops, tmpq, rko, RD_POLL_INFINITE); - - rd_kafka_q_destroy_owner(tmpq); - - if ((error = reply->rko_error)) { - reply->rko_error = NULL; - for_reuse = rd_false; - } - - rd_kafka_op_destroy(reply); - - rd_kafka_txn_curr_api_reset(rk, for_reuse); - - return error; -} - - -/** * @brief Async handler for init_transactions() * * @locks none @@ -1255,47 +1264,35 @@ return RD_KAFKA_OP_RES_HANDLED; rd_kafka_wrlock(rk); + if ((error = rd_kafka_txn_require_state( rk, RD_KAFKA_TXN_STATE_INIT, RD_KAFKA_TXN_STATE_WAIT_PID, RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) { rd_kafka_wrunlock(rk); - goto done; - } + rd_kafka_txn_curr_api_set_result(rk, 0, error); - if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_READY_NOT_ACKED) { + } else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_READY_NOT_ACKED) { /* A previous init_transactions() called finished successfully * after timeout, the application has called init_transactions() * again, we do nothin here, ack_init_transactions() will * transition the state from READY_NOT_ACKED to READY. */ rd_kafka_wrunlock(rk); - goto done; - } - /* Possibly a no-op if already in WAIT_PID state */ - rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_WAIT_PID); - - /* Destroy previous reply queue for a previously timed out - * init_transactions() call. */ - if (rk->rk_eos.txn_init_rkq) - rd_kafka_q_destroy(rk->rk_eos.txn_init_rkq); - - /* Grab a separate reference to use in state_change(), - * outside the curr_api to allow the curr_api to timeout while - * the background init continues. */ - rk->rk_eos.txn_init_rkq = rd_kafka_q_keep(rko->rko_replyq.q); + } else { - rd_kafka_wrunlock(rk); + /* Possibly a no-op if already in WAIT_PID state */ + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_WAIT_PID); - rk->rk_eos.txn_init_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rk->rk_eos.txn_init_err = RD_KAFKA_RESP_ERR_NO_ERROR; - /* Start idempotent producer to acquire PID */ - rd_kafka_idemp_start(rk, rd_true /*immediately*/); + rd_kafka_wrunlock(rk); - return RD_KAFKA_OP_RES_HANDLED; + /* Start idempotent producer to acquire PID */ + rd_kafka_idemp_start(rk, rd_true /*immediately*/); -done: - rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), - error); + /* Do not call curr_api_set_result, it will be triggered from + * idemp_state_change() when the PID has been retrieved. */ + } return RD_KAFKA_OP_RES_HANDLED; } @@ -1318,20 +1315,14 @@ return RD_KAFKA_OP_RES_HANDLED; rd_kafka_wrlock(rk); - if ((error = rd_kafka_txn_require_state( - rk, RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) { - rd_kafka_wrunlock(rk); - goto done; - } - rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY); + if (!(error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY); rd_kafka_wrunlock(rk); - /* FALLTHRU */ -done: - rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), - error); + rd_kafka_txn_curr_api_set_result(rk, 0, error); return RD_KAFKA_OP_RES_HANDLED; } @@ -1340,13 +1331,26 @@ rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms) { rd_kafka_error_t *error; + rd_ts_t abs_timeout; - if ((error = rd_kafka_ensure_transactional(rk))) + /* Cap actual timeout to transaction.timeout.ms * 2 when an infinite + * timeout is provided, this is to make sure the call doesn't block + * indefinitely in case a coordinator is not available. + * This is only needed for init_transactions() since there is no + * coordinator to time us out yet. */ + if (timeout_ms == RD_POLL_INFINITE && + /* Avoid overflow */ + rk->rk_conf.eos.transaction_timeout_ms < INT_MAX / 2) + timeout_ms = rk->rk_conf.eos.transaction_timeout_ms * 2; + + if ((error = rd_kafka_txn_curr_api_begin(rk, "init_transactions", + rd_false /* no cap */, + timeout_ms, &abs_timeout))) return error; /* init_transactions() will continue to operate in the background * if the timeout expires, and the application may call - * init_transactions() again to "continue" with the initialization + * init_transactions() again to resume the initialization * process. * For this reason we need two states: * - TXN_STATE_READY_NOT_ACKED for when initialization is done @@ -1360,25 +1364,42 @@ * thread (to keep txn_state synchronization in one place). */ /* First call is to trigger initialization */ - error = rd_kafka_txn_curr_api_req( - rk, __FUNCTION__, - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_init_transactions), - timeout_ms, - RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT | - RD_KAFKA_TXN_CURR_API_F_FOR_REUSE); - if (error) - return error; + if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_init_transactions, + abs_timeout))) { + if (rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR__TIMED_OUT) { + /* See if there's a more meaningful txn_init_err set + * by idempo that we can return. */ + rd_kafka_resp_err_t err; + rd_kafka_rdlock(rk); + err = + rd_kafka_txn_normalize_err(rk->rk_eos.txn_init_err); + rd_kafka_rdunlock(rk); + + if (err && err != RD_KAFKA_RESP_ERR__TIMED_OUT) { + rd_kafka_error_destroy(error); + error = rd_kafka_error_new_retriable( + err, "Failed to initialize Producer ID: %s", + rd_kafka_err2str(err)); + } + } + + return rd_kafka_txn_curr_api_return(rk, rd_true, error); + } /* Second call is to transition from READY_NOT_ACKED -> READY, * if necessary. */ - return rd_kafka_txn_curr_api_req( - rk, __FUNCTION__, - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_ack_init_transactions), - RD_POLL_INFINITE, /* immediate, no timeout needed */ - RD_KAFKA_TXN_CURR_API_F_REUSE); + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_ack_init_transactions, + /* Timeout must be infinite since this is + * a synchronization point. + * The call is immediate though, so this + * will not block. */ + RD_POLL_INFINITE); + + return rd_kafka_txn_curr_api_return(rk, + /* not resumable at this point */ + rd_false, error); } @@ -1422,32 +1443,24 @@ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT, "begin transaction"); - rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), - error); + rd_kafka_txn_curr_api_set_result(rk, 0, error); return RD_KAFKA_OP_RES_HANDLED; } rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk) { - rd_kafka_op_t *reply; rd_kafka_error_t *error; - if ((error = rd_kafka_ensure_transactional(rk))) + if ((error = rd_kafka_txn_curr_api_begin(rk, "begin_transaction", + rd_false, 0, NULL))) return error; - reply = rd_kafka_op_req( - rk->rk_ops, - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_begin_transaction), - RD_POLL_INFINITE); + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_transaction, + RD_POLL_INFINITE); - if ((error = reply->rko_error)) - reply->rko_error = NULL; - - rd_kafka_op_destroy(reply); - - return error; + return rd_kafka_txn_curr_api_return(rk, rd_false /*not resumable*/, + error); } @@ -1478,10 +1491,6 @@ *errstr = '\0'; - if (err != RD_KAFKA_RESP_ERR__DESTROY && - !rd_kafka_q_ready(rko->rko_replyq.q)) - err = RD_KAFKA_RESP_ERR__OUTDATED; - if (err) goto done; @@ -1535,8 +1544,9 @@ case RD_KAFKA_RESP_ERR__DESTROY: /* Producer is being terminated, ignore the response. */ case RD_KAFKA_RESP_ERR__OUTDATED: - /* Set a non-actionable actions flag so that curr_api_reply() - * is called below, without other side-effects. */ + /* Set a non-actionable actions flag so that + * curr_api_set_result() is called below, without + * other side-effects. */ actions = RD_KAFKA_ERR_ACTION_SPECIAL; return; @@ -1597,6 +1607,7 @@ rk, RD_KAFKA_COORD_GROUP, rko->rko_u.txn.cgmetadata->group_id, rd_kafka_txn_send_TxnOffsetCommitRequest, rko, + 500 /* 500ms delay before retrying */, rd_timeout_remains_limit0( remains_ms, rk->rk_conf.socket_timeout_ms), RD_KAFKA_REPLYQ(rk->rk_ops, 0), @@ -1611,12 +1622,10 @@ rd_kafka_txn_set_abortable_error(rk, err, "%s", errstr); if (err) - rd_kafka_txn_curr_api_reply(rd_kafka_q_keep(rko->rko_replyq.q), - actions, err, "%s", errstr); + rd_kafka_txn_curr_api_set_result( + rk, actions, rd_kafka_error_new(err, "%s", errstr)); else - rd_kafka_txn_curr_api_reply(rd_kafka_q_keep(rko->rko_replyq.q), - 0, RD_KAFKA_RESP_ERR_NO_ERROR, - NULL); + rd_kafka_txn_curr_api_set_result(rk, 0, NULL); rd_kafka_op_destroy(rko); } @@ -1652,7 +1661,7 @@ return RD_KAFKA_RESP_ERR__STATE; } - pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK); + pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false); rd_kafka_rdunlock(rk); if (!rd_kafka_pid_valid(pid)) { /* Do not free the rko, it is passed as the reply_opaque @@ -1743,9 +1752,6 @@ return; } - if (!rd_kafka_q_ready(rko->rko_replyq.q)) - err = RD_KAFKA_RESP_ERR__OUTDATED; - if (err) goto done; @@ -1765,7 +1771,6 @@ } remains_ms = rd_timeout_remains(rko->rko_u.txn.abs_timeout); - if (rd_timeout_expired(remains_ms) && !err) err = RD_KAFKA_RESP_ERR__TIMED_OUT; @@ -1776,8 +1781,9 @@ case RD_KAFKA_RESP_ERR__DESTROY: /* Producer is being terminated, ignore the response. */ case RD_KAFKA_RESP_ERR__OUTDATED: - /* Set a non-actionable actions flag so that curr_api_reply() - * is called below, without other side-effects. */ + /* Set a non-actionable actions flag so that + * curr_api_set_result() is called below, without + * other side-effects. */ actions = RD_KAFKA_ERR_ACTION_SPECIAL; break; @@ -1844,13 +1850,13 @@ rd_kafka_txn_coord_timer_start(rk, 50); if (actions & RD_KAFKA_ERR_ACTION_RETRY) { - rd_rkb_dbg(rkb, EOS, "ADDOFFSETS", - "Failed to add offsets to transaction on " - "broker %s: %s (after %dms): " - "error is retriable", - rd_kafka_broker_name(rkb), - rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent / 1000)); + rd_rkb_dbg( + rkb, EOS, "ADDOFFSETS", + "Failed to add offsets to transaction on " + "broker %s: %s (after %dms, %dms remains): " + "error is retriable", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000), remains_ms); if (!rd_timeout_expired(remains_ms) && rd_kafka_buf_retry(rk->rk_eos.txn_coord, request)) { @@ -1887,6 +1893,7 @@ rk, RD_KAFKA_COORD_GROUP, rko->rko_u.txn.cgmetadata->group_id, rd_kafka_txn_send_TxnOffsetCommitRequest, rko, + 0 /* no delay */, rd_timeout_remains_limit0(remains_ms, rk->rk_conf.socket_timeout_ms), RD_KAFKA_REPLYQ(rk->rk_ops, 0), @@ -1894,12 +1901,14 @@ } else { - rd_kafka_txn_curr_api_reply( - rd_kafka_q_keep(rko->rko_replyq.q), actions, err, - "Failed to add offsets to transaction on broker %s: " - "%s (after %dms)", - rd_kafka_broker_name(rkb), rd_kafka_err2str(err), - (int)(request->rkbuf_ts_sent / 1000)); + rd_kafka_txn_curr_api_set_result( + rk, actions, + rd_kafka_error_new( + err, + "Failed to add offsets to transaction on " + "broker %s: %s (after %dms)", + rd_kafka_broker_name(rkb), rd_kafka_err2str(err), + (int)(request->rkbuf_ts_sent / 1000))); rd_kafka_op_destroy(rko); } @@ -1936,7 +1945,7 @@ rd_kafka_wrunlock(rk); - pid = rd_kafka_idemp_get_pid0(rk, rd_false /*dont-lock*/); + pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false); if (!rd_kafka_pid_valid(pid)) { rd_dassert(!*"BUG: No PID despite proper transaction state"); error = rd_kafka_error_new_retriable( @@ -1966,8 +1975,7 @@ return RD_KAFKA_OP_RES_KEEP; /* the rko is passed to AddOffsetsToTxn */ err: - rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), - error); + rd_kafka_txn_curr_api_set_result(rk, 0, error); return RD_KAFKA_OP_RES_HANDLED; } @@ -1984,15 +1992,20 @@ rd_kafka_error_t *error; rd_kafka_op_t *rko; rd_kafka_topic_partition_list_t *valid_offsets; - - if ((error = rd_kafka_ensure_transactional(rk))) - return error; + rd_ts_t abs_timeout; if (!cgmetadata || !offsets) return rd_kafka_error_new( RD_KAFKA_RESP_ERR__INVALID_ARG, "cgmetadata and offsets are required parameters"); + if ((error = rd_kafka_txn_curr_api_begin( + rk, "send_offsets_to_transaction", + /* Cap timeout to txn timeout */ + rd_true, timeout_ms, &abs_timeout))) + return error; + + valid_offsets = rd_kafka_topic_partition_list_match( offsets, rd_kafka_topic_partition_match_valid_offset, NULL); @@ -2000,7 +2013,7 @@ /* No valid offsets, e.g., nothing was consumed, * this is not an error, do nothing. */ rd_kafka_topic_partition_list_destroy(valid_offsets); - return NULL; + return rd_kafka_txn_curr_api_return(rk, rd_false, NULL); } rd_kafka_topic_partition_list_sort_by_topic(valid_offsets); @@ -2010,14 +2023,12 @@ rko->rko_u.txn.offsets = valid_offsets; rko->rko_u.txn.cgmetadata = rd_kafka_consumer_group_metadata_dup(cgmetadata); - if (timeout_ms > rk->rk_conf.eos.transaction_timeout_ms) - timeout_ms = rk->rk_conf.eos.transaction_timeout_ms; - rko->rko_u.txn.abs_timeout = rd_timeout_init(timeout_ms); - - return rd_kafka_txn_curr_api_req( - rk, __FUNCTION__, rko, - RD_POLL_INFINITE, /* rely on background code to time out */ - RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT); + rko->rko_u.txn.abs_timeout = abs_timeout; + + /* Timeout is enforced by op_send_offsets_to_transaction() */ + error = rd_kafka_txn_op_req1(rk, rko, RD_POLL_INFINITE); + + return rd_kafka_txn_curr_api_return(rk, rd_false, error); } @@ -2045,6 +2056,53 @@ } +/** + * @brief EndTxn (commit or abort of transaction on the coordinator) is done, + * or was skipped. + * Continue with next steps (if any) before completing the local + * transaction state. + * + * @locality rdkafka main thread + * @locks_acquired rd_kafka_wrlock(rk), rk->rk_eos.txn_curr_api.lock + */ +static void rd_kafka_txn_endtxn_complete(rd_kafka_t *rk) { + rd_bool_t is_commit; + + mtx_lock(&rk->rk_eos.txn_curr_api.lock); + is_commit = !strcmp(rk->rk_eos.txn_curr_api.name, "commit_transaction"); + mtx_unlock(&rk->rk_eos.txn_curr_api.lock); + + rd_kafka_wrlock(rk); + + /* If an epoch bump is required, let idempo handle it. + * When the bump is finished we'll be notified through + * idemp_state_change() and we can complete the local transaction state + * and set the final API call result. + * If the bumping fails a fatal error will be raised. */ + if (rk->rk_eos.txn_requires_epoch_bump) { + rd_kafka_resp_err_t bump_err = rk->rk_eos.txn_err; + rd_dassert(!is_commit); + + rd_kafka_wrunlock(rk); + + /* After the epoch bump is done we'll be transitioned + * to the next state. */ + rd_kafka_idemp_drain_epoch_bump0( + rk, rd_false /* don't allow txn abort */, bump_err, + "Transaction aborted: %s", rd_kafka_err2str(bump_err)); + return; + } + + if (is_commit) + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED); + else + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); + + rd_kafka_wrunlock(rk); + + rd_kafka_txn_curr_api_set_result(rk, 0, NULL); +} + /** * @brief Handle EndTxnResponse (commit or abort) @@ -2059,15 +2117,12 @@ rd_kafka_buf_t *request, void *opaque) { const int log_decode_errors = LOG_ERR; - rd_kafka_q_t *rkq = opaque; int16_t ErrorCode; int actions = 0; - rd_bool_t is_commit, may_retry = rd_false; + rd_bool_t is_commit, may_retry = rd_false, require_bump = rd_false; - if (err == RD_KAFKA_RESP_ERR__DESTROY) { - rd_kafka_q_destroy(rkq); + if (err == RD_KAFKA_RESP_ERR__DESTROY) return; - } is_commit = request->rkbuf_u.EndTxn.commit; @@ -2100,38 +2155,43 @@ * This is a tricky state since the transaction will have * failed locally but the EndTxn(commit) may have succeeded. */ - rd_kafka_wrunlock(rk); if (err) { - rd_kafka_txn_curr_api_reply( - rkq, RD_KAFKA_ERR_ACTION_PERMANENT, - rk->rk_eos.txn_err, - "EndTxn failed with %s but transaction " - "had already failed due to: %s", - rd_kafka_err2name(err), rk->rk_eos.txn_errstr); + rd_kafka_txn_curr_api_set_result( + rk, RD_KAFKA_ERR_ACTION_PERMANENT, + rd_kafka_error_new( + rk->rk_eos.txn_err, + "EndTxn failed with %s but transaction " + "had already failed due to: %s", + rd_kafka_err2name(err), rk->rk_eos.txn_errstr)); } else { /* If the transaction has failed locally but * this EndTxn commit succeeded we'll raise * a fatal error. */ if (is_commit) - rd_kafka_txn_curr_api_reply( - rkq, RD_KAFKA_ERR_ACTION_FATAL, - rk->rk_eos.txn_err, - "Transaction commit succeeded on the " - "broker but the transaction " - "had already failed locally due to: %s", - rk->rk_eos.txn_errstr); + rd_kafka_txn_curr_api_set_result( + rk, RD_KAFKA_ERR_ACTION_FATAL, + rd_kafka_error_new( + rk->rk_eos.txn_err, + "Transaction commit succeeded on the " + "broker but the transaction " + "had already failed locally due to: %s", + rk->rk_eos.txn_errstr)); else - rd_kafka_txn_curr_api_reply( - rkq, RD_KAFKA_ERR_ACTION_PERMANENT, - rk->rk_eos.txn_err, - "Transaction abort succeeded on the " - "broker but the transaction" - "had already failed locally due to: %s", - rk->rk_eos.txn_errstr); + rd_kafka_txn_curr_api_set_result( + rk, RD_KAFKA_ERR_ACTION_PERMANENT, + rd_kafka_error_new( + rk->rk_eos.txn_err, + "Transaction abort succeeded on the " + "broker but the transaction" + "had already failed locally due to: %s", + rk->rk_eos.txn_errstr)); } + rd_kafka_wrunlock(rk); + + return; } else if (!err) { @@ -2146,16 +2206,6 @@ rd_kafka_txn_state2str(rk->rk_eos.txn_state), RD_STR_ToF(may_retry)); - if (!err) { - /* EndTxn successful */ - if (is_commit) - rd_kafka_txn_set_state( - rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED); - else - rd_kafka_txn_set_state( - rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); - } - rd_kafka_wrunlock(rk); switch (err) { @@ -2164,14 +2214,14 @@ case RD_KAFKA_RESP_ERR__DESTROY: /* Producer is being terminated, ignore the response. */ - case RD_KAFKA_RESP_ERR__TIMED_OUT: - /* Transaction API timeout has been hit - * (this is our internal timer) */ case RD_KAFKA_RESP_ERR__OUTDATED: /* Transactional state no longer relevant for this * outdated response. */ break; - + case RD_KAFKA_RESP_ERR__TIMED_OUT: + case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE: + /* Request timeout */ + /* FALLTHRU */ case RD_KAFKA_RESP_ERR__TRANSPORT: actions |= RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH; @@ -2194,6 +2244,7 @@ case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID: case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING: actions |= RD_KAFKA_ERR_ACTION_PERMANENT; + require_bump = rd_true; break; case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH: @@ -2219,24 +2270,52 @@ if (actions & RD_KAFKA_ERR_ACTION_REFRESH) rd_kafka_txn_coord_timer_start(rk, 50); - if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) - rd_kafka_txn_set_abortable_error( - rk, err, + if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) { + if (require_bump && !is_commit) { + /* Abort failed to due invalid PID, starting + * with KIP-360 we can have idempo sort out + * epoch bumping. + * When the epoch has been bumped we'll detect + * the idemp_state_change and complete the + * current API call. */ + rd_kafka_idemp_drain_epoch_bump0( + rk, + /* don't allow txn abort */ + rd_false, err, "EndTxn %s failed: %s", + is_commit ? "commit" : "abort", + rd_kafka_err2str(err)); + return; + } + + /* For aborts we need to revert the state back to + * BEGIN_ABORT so that the abort can be retried from + * the beginning in op_abort_transaction(). */ + rd_kafka_wrlock(rk); + if (rk->rk_eos.txn_state == + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) + rd_kafka_txn_set_state( + rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT); + rd_kafka_wrunlock(rk); + + rd_kafka_txn_set_abortable_error0( + rk, err, require_bump, "Failed to end transaction: " "%s", rd_kafka_err2str(err)); - else if (may_retry && actions & RD_KAFKA_ERR_ACTION_RETRY && - rd_kafka_buf_retry(rkb, request)) + + } else if (may_retry && actions & RD_KAFKA_ERR_ACTION_RETRY && + rd_kafka_buf_retry(rkb, request)) return; } if (err) - rd_kafka_txn_curr_api_reply( - rkq, actions, err, "EndTxn %s failed: %s", - is_commit ? "commit" : "abort", rd_kafka_err2str(err)); + rd_kafka_txn_curr_api_set_result( + rk, actions, + rd_kafka_error_new(err, "EndTxn %s failed: %s", + is_commit ? "commit" : "abort", + rd_kafka_err2str(err))); else - rd_kafka_txn_curr_api_reply(rkq, 0, RD_KAFKA_RESP_ERR_NO_ERROR, - NULL); + rd_kafka_txn_endtxn_complete(rk); } @@ -2264,15 +2343,24 @@ if ((error = rd_kafka_txn_require_state( rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT, + RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) goto done; if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED) { - /* A previous call to commit_transaction() timed out but - * the committing completed since then, we still need to wait - * for the application to call commit_transaction() again - * to synchronize state, and it just did. */ + /* A previous call to commit_transaction() timed out but the + * commit completed since then, we still + * need to wait for the application to call commit_transaction() + * again to resume the call, and it just did. */ goto done; + } else if (rk->rk_eos.txn_state == + RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION) { + /* A previous call to commit_transaction() timed out but the + * commit is still in progress, we still + * need to wait for the application to call commit_transaction() + * again to resume the call, and it just did. */ + rd_kafka_wrunlock(rk); + return RD_KAFKA_OP_RES_HANDLED; } /* If any messages failed delivery the transaction must be aborted. */ @@ -2294,11 +2382,12 @@ * (since it will not have any txn state). */ rd_kafka_dbg(rk, EOS, "TXNCOMMIT", "No partitions registered: not sending EndTxn"); - rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED); - goto done; + rd_kafka_wrunlock(rk); + rd_kafka_txn_endtxn_complete(rk); + return RD_KAFKA_OP_RES_HANDLED; } - pid = rd_kafka_idemp_get_pid0(rk, rd_false /*dont-lock*/); + pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false); if (!rd_kafka_pid_valid(pid)) { rd_dassert(!*"BUG: No PID despite proper transaction state"); error = rd_kafka_error_new_retriable( @@ -2311,8 +2400,7 @@ err = rd_kafka_EndTxnRequest( rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid, rd_true /* commit */, errstr, sizeof(errstr), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, - rd_kafka_q_keep(rko->rko_replyq.q)); + RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, NULL); if (err) { error = rd_kafka_error_new_retriable(err, "%s", errstr); goto done; @@ -2334,8 +2422,7 @@ "%s", rd_kafka_error_string(error)); - rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), - error); + rd_kafka_txn_curr_api_set_result(rk, 0, error); return RD_KAFKA_OP_RES_HANDLED; } @@ -2358,22 +2445,22 @@ rd_kafka_wrlock(rk); - if ((error = rd_kafka_txn_require_state( - rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION, - RD_KAFKA_TXN_STATE_BEGIN_COMMIT, - RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) - goto done; - - if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED) - goto done; - - rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT); + error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION, + RD_KAFKA_TXN_STATE_BEGIN_COMMIT, + RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION, + RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED); + + if (!error && + rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION) { + /* Transition to BEGIN_COMMIT state if no error and commit not + * already started. */ + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT); + } - /* FALLTHRU */ -done: rd_kafka_wrunlock(rk); - rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), - error); + + rd_kafka_txn_curr_api_set_result(rk, 0, error); return RD_KAFKA_OP_RES_HANDLED; } @@ -2396,52 +2483,47 @@ rd_kafka_wrlock(rk); - if ((error = rd_kafka_txn_require_state( - rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) - goto done; - - rd_kafka_dbg(rk, EOS, "TXNCOMMIT", - "Committed transaction now acked by application"); - rd_kafka_txn_complete(rk, rd_true /*is commit*/); + if (!(error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) { + rd_kafka_dbg(rk, EOS, "TXNCOMMIT", + "Committed transaction now acked by application"); + rd_kafka_txn_complete(rk, rd_true /*is commit*/); + } - /* FALLTHRU */ -done: rd_kafka_wrunlock(rk); - rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), - error); + rd_kafka_txn_curr_api_set_result(rk, 0, error); return RD_KAFKA_OP_RES_HANDLED; } + rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms) { rd_kafka_error_t *error; rd_kafka_resp_err_t err; rd_ts_t abs_timeout; - if ((error = rd_kafka_ensure_transactional(rk))) - return error; - - /* The commit is in two phases: + /* The commit is in three phases: * - begin commit: wait for outstanding messages to be produced, * disallow new messages from being produced * by application. * - commit: commit transaction. + * - commit not acked: commit done, but waiting for application + * to acknowledge by completing this API call. */ - abs_timeout = rd_timeout_init(timeout_ms); + if ((error = rd_kafka_txn_curr_api_begin(rk, "commit_transaction", + rd_false /* no cap */, + timeout_ms, &abs_timeout))) + return error; /* Begin commit */ - error = rd_kafka_txn_curr_api_req( - rk, "commit_transaction (begin)", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_begin_commit), - rd_timeout_remains(abs_timeout), - RD_KAFKA_TXN_CURR_API_F_FOR_REUSE | - RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT); - if (error) - return error; + if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_commit, + abs_timeout))) + return rd_kafka_txn_curr_api_return(rk, + /* not resumable yet */ + rd_false, error); rd_kafka_dbg(rk, EOS, "TXNCOMMIT", "Flushing %d outstanding message(s) prior to commit", @@ -2458,7 +2540,7 @@ error = rd_kafka_error_new_retriable( err, "Failed to flush all outstanding messages " - "within the transaction timeout: " + "within the API timeout: " "%d message(s) remaining%s", rd_kafka_outq_len(rk), /* In case event queue delivery reports @@ -2477,35 +2559,32 @@ err, "Failed to flush outstanding messages: %s", rd_kafka_err2str(err)); - rd_kafka_txn_curr_api_reset(rk, rd_false); - - /* FIXME: What to do here? Add test case */ - - return error; + /* The commit operation is in progress in the background + * and the application will need to call this API again + * to resume. */ + return rd_kafka_txn_curr_api_return(rk, rd_true, error); } rd_kafka_dbg(rk, EOS, "TXNCOMMIT", "Transaction commit message flush complete"); /* Commit transaction */ - error = rd_kafka_txn_curr_api_req( - rk, "commit_transaction", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_commit_transaction), - rd_timeout_remains(abs_timeout), - RD_KAFKA_TXN_CURR_API_F_REUSE | RD_KAFKA_TXN_CURR_API_F_FOR_REUSE | - RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT); + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_commit_transaction, + abs_timeout); if (error) - return error; + return rd_kafka_txn_curr_api_return(rk, rd_true, error); /* Last call is to transition from COMMIT_NOT_ACKED to READY */ - return rd_kafka_txn_curr_api_req( - rk, "commit_transaction (ack)", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_commit_transaction_ack), - rd_timeout_remains(abs_timeout), - RD_KAFKA_TXN_CURR_API_F_REUSE | - RD_KAFKA_TXN_CURR_API_F_ABORT_ON_TIMEOUT); + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_commit_transaction_ack, + /* Timeout must be infinite since this is + * a synchronization point. + * The call is immediate though, so this + * will not block. */ + RD_POLL_INFINITE); + + return rd_kafka_txn_curr_api_return(rk, + /* not resumable at this point */ + rd_false, error); } @@ -2526,21 +2605,23 @@ return RD_KAFKA_OP_RES_HANDLED; rd_kafka_wrlock(rk); - if ((error = rd_kafka_txn_require_state( - rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION, - RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, - RD_KAFKA_TXN_STATE_ABORTABLE_ERROR, - RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) - goto done; - - if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED) - goto done; - rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION); - clear_pending = rd_true; + error = + rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION, + RD_KAFKA_TXN_STATE_BEGIN_ABORT, + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, + RD_KAFKA_TXN_STATE_ABORTABLE_ERROR, + RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); + + if (!error && + (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION || + rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR)) { + /* Transition to ABORTING_TRANSACTION state if no error and + * abort not already started. */ + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT); + clear_pending = rd_true; + } - /* FALLTHRU */ -done: rd_kafka_wrunlock(rk); if (clear_pending) { @@ -2549,8 +2630,7 @@ mtx_unlock(&rk->rk_eos.txn_pending_lock); } - rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), - error); + rd_kafka_txn_curr_api_set_result(rk, 0, error); return RD_KAFKA_OP_RES_HANDLED; } @@ -2576,7 +2656,8 @@ rd_kafka_wrlock(rk); if ((error = rd_kafka_txn_require_state( - rk, RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, + rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT, + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) goto done; @@ -2586,59 +2667,48 @@ * for the application to call abort_transaction() again * to synchronize state, and it just did. */ goto done; + } else if (rk->rk_eos.txn_state == + RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) { + /* A previous call to abort_transaction() timed out but + * the abort is still in progress, we still need to wait + * for the application to call abort_transaction() again + * to synchronize state, and it just did. */ + rd_kafka_wrunlock(rk); + return RD_KAFKA_OP_RES_HANDLED; } - if (rk->rk_eos.txn_requires_epoch_bump || - rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_ASSIGNED) { - /* If the underlying idempotent producer's state indicates it - * is re-acquiring its PID we need to wait for that to finish - * before allowing a new begin_transaction(), and since that is - * not a blocking call we need to perform that wait in this - * state instead. - * This may happen on epoch bump and fatal idempotent producer - * error which causes the current transaction to enter the - * abortable state. - * To recover we need to request an epoch bump from the - * transaction coordinator. This is handled automatically - * by the idempotent producer, so we just need to wait for - * the new pid to be assigned. - */ - - if (rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED) { - rd_kafka_dbg(rk, EOS, "TXNABORT", "PID already bumped"); - rd_kafka_txn_set_state( - rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); - goto done; - } + if (!rk->rk_eos.txn_req_cnt) { + rd_kafka_dbg(rk, EOS, "TXNABORT", + "No partitions registered: not sending EndTxn"); + rd_kafka_wrunlock(rk); + rd_kafka_txn_endtxn_complete(rk); + return RD_KAFKA_OP_RES_HANDLED; + } + /* If the underlying idempotent producer's state indicates it + * is re-acquiring its PID we need to wait for that to finish + * before allowing a new begin_transaction(), and since that is + * not a blocking call we need to perform that wait in this + * state instead. + * To recover we need to request an epoch bump from the + * transaction coordinator. This is handled automatically + * by the idempotent producer, so we just need to wait for + * the new pid to be assigned. + */ + if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_ASSIGNED && + rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT) { rd_kafka_dbg(rk, EOS, "TXNABORT", "Waiting for transaction coordinator " "PID bump to complete before aborting " "transaction (idempotent producer state %s)", rd_kafka_idemp_state2str(rk->rk_eos.idemp_state)); - /* Replace the current init replyq, if any, which is - * from a previous timed out abort_transaction() call. */ - RD_IF_FREE(rk->rk_eos.txn_init_rkq, rd_kafka_q_destroy); - - /* Grab a separate reference to use in state_change(), - * outside the curr_api to allow the curr_api to - * to timeout while the PID bump continues in the - * the background. */ - rk->rk_eos.txn_init_rkq = rd_kafka_q_keep(rko->rko_replyq.q); - rd_kafka_wrunlock(rk); - return RD_KAFKA_OP_RES_HANDLED; - } - if (!rk->rk_eos.txn_req_cnt) { - rd_kafka_dbg(rk, EOS, "TXNABORT", - "No partitions registered: not sending EndTxn"); - rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED); - goto done; + return RD_KAFKA_OP_RES_HANDLED; } - pid = rd_kafka_idemp_get_pid0(rk, rd_false /*dont-lock*/); + pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_true); if (!rd_kafka_pid_valid(pid)) { rd_dassert(!*"BUG: No PID despite proper transaction state"); error = rd_kafka_error_new_retriable( @@ -2651,13 +2721,14 @@ err = rd_kafka_EndTxnRequest( rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid, rd_false /* abort */, errstr, sizeof(errstr), - RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, - rd_kafka_q_keep(rko->rko_replyq.q)); + RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, NULL); if (err) { error = rd_kafka_error_new_retriable(err, "%s", errstr); goto done; } + rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION); + rd_kafka_wrunlock(rk); return RD_KAFKA_OP_RES_HANDLED; @@ -2665,10 +2736,7 @@ done: rd_kafka_wrunlock(rk); - rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), - error); - - // FIXME: What state do we transition to? READY? FATAL? + rd_kafka_txn_curr_api_set_result(rk, 0, error); return RD_KAFKA_OP_RES_HANDLED; } @@ -2691,20 +2759,16 @@ rd_kafka_wrlock(rk); - if ((error = rd_kafka_txn_require_state( - rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) - goto done; - - rd_kafka_dbg(rk, EOS, "TXNABORT", - "Aborted transaction now acked by application"); - rd_kafka_txn_complete(rk, rd_false /*is abort*/); + if (!(error = rd_kafka_txn_require_state( + rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) { + rd_kafka_dbg(rk, EOS, "TXNABORT", + "Aborted transaction now acked by application"); + rd_kafka_txn_complete(rk, rd_false /*is abort*/); + } - /* FALLTHRU */ -done: rd_kafka_wrunlock(rk); - rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q), - error); + rd_kafka_txn_curr_api_set_result(rk, 0, error); return RD_KAFKA_OP_RES_HANDLED; } @@ -2714,30 +2778,25 @@ rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms) { rd_kafka_error_t *error; rd_kafka_resp_err_t err; - rd_ts_t abs_timeout = rd_timeout_init(timeout_ms); + rd_ts_t abs_timeout; - if ((error = rd_kafka_ensure_transactional(rk))) + if ((error = rd_kafka_txn_curr_api_begin(rk, "abort_transaction", + rd_false /* no cap */, + timeout_ms, &abs_timeout))) return error; /* The abort is multi-phase: - * - set state to ABORTING_TRANSACTION + * - set state to BEGIN_ABORT * - flush() outstanding messages * - send EndTxn - * - * The curr_api must be reused during all these steps to avoid - * a race condition where another application thread calls a - * txn API inbetween the steps. */ - error = rd_kafka_txn_curr_api_req( - rk, "abort_transaction (begin)", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_begin_abort), - RD_POLL_INFINITE, /* begin_abort is immediate, no timeout */ - RD_KAFKA_TXN_CURR_API_F_FOR_REUSE | - RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT); - if (error) - return error; + /* Begin abort */ + if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_abort, + abs_timeout))) + return rd_kafka_txn_curr_api_return(rk, + /* not resumable yet */ + rd_false, error); rd_kafka_dbg(rk, EOS, "TXNABORT", "Purging and flushing %d outstanding message(s) prior " @@ -2757,7 +2816,7 @@ error = rd_kafka_error_new_retriable( err, "Failed to flush all outstanding messages " - "within the transaction timeout: " + "within the API timeout: " "%d message(s) remaining%s", rd_kafka_outq_len(rk), (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) @@ -2771,32 +2830,31 @@ err, "Failed to flush outstanding messages: %s", rd_kafka_err2str(err)); - rd_kafka_txn_curr_api_reset(rk, rd_false); - - /* FIXME: What to do here? */ - - return error; + /* The abort operation is in progress in the background + * and the application will need to call this API again + * to resume. */ + return rd_kafka_txn_curr_api_return(rk, rd_true, error); } rd_kafka_dbg(rk, EOS, "TXNCOMMIT", "Transaction abort message purge and flush complete"); - error = rd_kafka_txn_curr_api_req( - rk, "abort_transaction", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_abort_transaction), - rd_timeout_remains(abs_timeout), - RD_KAFKA_TXN_CURR_API_F_FOR_REUSE | RD_KAFKA_TXN_CURR_API_F_REUSE | - RD_KAFKA_TXN_CURR_API_F_RETRIABLE_ON_TIMEOUT); + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_abort_transaction, + abs_timeout); if (error) - return error; + return rd_kafka_txn_curr_api_return(rk, rd_true, error); /* Last call is to transition from ABORT_NOT_ACKED to READY. */ - return rd_kafka_txn_curr_api_req( - rk, "abort_transaction (ack)", - rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, - rd_kafka_txn_op_abort_transaction_ack), - rd_timeout_remains(abs_timeout), RD_KAFKA_TXN_CURR_API_F_REUSE); + error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_abort_transaction_ack, + /* Timeout must be infinite since this is + * a synchronization point. + * The call is immediate though, so this + * will not block. */ + RD_POLL_INFINITE); + + return rd_kafka_txn_curr_api_return(rk, + /* not resumable at this point */ + rd_false, error); } @@ -3122,9 +3180,12 @@ * @locks none */ void rd_kafka_txns_term(rd_kafka_t *rk) { - RD_IF_FREE(rk->rk_eos.txn_init_rkq, rd_kafka_q_destroy); RD_IF_FREE(rk->rk_eos.txn_errstr, rd_free); + RD_IF_FREE(rk->rk_eos.txn_curr_api.error, rd_kafka_error_destroy); + + mtx_destroy(&rk->rk_eos.txn_curr_api.lock); + cnd_destroy(&rk->rk_eos.txn_curr_api.cnd); rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.txn_coord_tmr, 1); rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.txn_register_parts_tmr, @@ -3162,6 +3223,9 @@ TAILQ_INIT(&rk->rk_eos.txn_waitresp_rktps); TAILQ_INIT(&rk->rk_eos.txn_rktps); + mtx_init(&rk->rk_eos.txn_curr_api.lock, mtx_plain); + cnd_init(&rk->rk_eos.txn_curr_api.cnd); + /* Logical coordinator */ rk->rk_eos.txn_coord = rd_kafka_broker_add_logical(rk, "TxnCoordinator"); diff -Nru librdkafka-1.9.2/src/rdstring.c librdkafka-2.0.2/src/rdstring.c --- librdkafka-1.9.2/src/rdstring.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src/rdstring.c 2023-01-20 09:14:36.000000000 +0000 @@ -443,7 +443,7 @@ size_t i = 0; size_t elen = 0; - *cntp = '\0'; + *cntp = 0; /* First count the maximum number of fields so we know how large of * an array we need to allocate. Escapes are ignored. */ diff -Nru librdkafka-1.9.2/src-cpp/HandleImpl.cpp librdkafka-2.0.2/src-cpp/HandleImpl.cpp --- librdkafka-1.9.2/src-cpp/HandleImpl.cpp 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src-cpp/HandleImpl.cpp 2023-01-20 09:14:36.000000000 +0000 @@ -419,4 +419,4 @@ } } -}; // namespace RdKafka +} // namespace RdKafka diff -Nru librdkafka-1.9.2/src-cpp/MetadataImpl.cpp librdkafka-2.0.2/src-cpp/MetadataImpl.cpp --- librdkafka-1.9.2/src-cpp/MetadataImpl.cpp 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src-cpp/MetadataImpl.cpp 2023-01-20 09:14:36.000000000 +0000 @@ -30,10 +30,14 @@ using namespace RdKafka; -BrokerMetadata::~BrokerMetadata() {}; -PartitionMetadata::~PartitionMetadata() {}; -TopicMetadata::~TopicMetadata() {}; -Metadata::~Metadata() {}; +BrokerMetadata::~BrokerMetadata() { +} +PartitionMetadata::~PartitionMetadata() { +} +TopicMetadata::~TopicMetadata() { +} +Metadata::~Metadata() { +} /** @@ -49,7 +53,7 @@ return broker_metadata_->id; } - const std::string host() const { + std::string host() const { return host_; } int port() const { @@ -101,7 +105,8 @@ return &isrs_; } - ~PartitionMetadataImpl() {}; + ~PartitionMetadataImpl() { + } private: const rd_kafka_metadata_partition_t *partition_metadata_; @@ -126,7 +131,7 @@ delete partitions_[i]; } - const std::string topic() const { + std::string topic() const { return topic_; } const std::vector *partitions() const { diff -Nru librdkafka-1.9.2/src-cpp/rdkafkacpp.h librdkafka-2.0.2/src-cpp/rdkafkacpp.h --- librdkafka-1.9.2/src-cpp/rdkafkacpp.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src-cpp/rdkafkacpp.h 2023-01-20 09:14:36.000000000 +0000 @@ -111,7 +111,7 @@ * @remark This value should only be used during compile time, * for runtime checks of version use RdKafka::version() */ -#define RD_KAFKA_VERSION 0x010902ff +#define RD_KAFKA_VERSION 0x020002ff /** * @brief Returns the librdkafka version as integer. @@ -1326,6 +1326,14 @@ * * @remark CA certificate in PEM format may also be set with the * `ssl.ca.pem` configuration property. + * + * @remark When librdkafka is linked to OpenSSL 3.0 and the certificate is + * encoded using an obsolete cipher, it might be necessary to set up + * an OpenSSL configuration file to load the "legacy" provider and + * set the OPENSSL_CONF environment variable. + * See + * https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more + * information. */ virtual Conf::ConfResult set_ssl_cert(RdKafka::CertificateType cert_type, RdKafka::CertificateEncoding cert_enc, @@ -1505,7 +1513,7 @@ } /** @returns the name of the handle */ - virtual const std::string name() const = 0; + virtual std::string name() const = 0; /** * @brief Returns the client's broker-assigned group member id @@ -1515,7 +1523,7 @@ * @returns Last assigned member id, or empty string if not currently * a group member. */ - virtual const std::string memberid() const = 0; + virtual std::string memberid() const = 0; /** @@ -1714,7 +1722,7 @@ * @returns Last cached ClusterId, or empty string if no ClusterId could be * retrieved in the allotted timespan. */ - virtual const std::string clusterid(int timeout_ms) = 0; + virtual std::string clusterid(int timeout_ms) = 0; /** * @brief Returns the underlying librdkafka C rd_kafka_t handle. @@ -1891,6 +1899,23 @@ * that explicitly mention using this function for freeing. */ virtual void mem_free(void *ptr) = 0; + + /** + * @brief Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by + * this Kafka client. + * + * This function sets or resets the SASL username and password credentials + * used by this Kafka client. The new credentials will be used the next time + * this client needs to authenticate to a broker. + * will not disconnect existing connections that might have been made using + * the old credentials. + * + * @remark This function only applies to the SASL PLAIN and SCRAM mechanisms. + * + * @returns NULL on success or an error object on error. + */ + virtual Error *sasl_set_credentials(const std::string &username, + const std::string &password) = 0; }; @@ -1996,7 +2021,7 @@ /** @returns the topic name */ - virtual const std::string name() const = 0; + virtual std::string name() const = 0; /** * @returns true if \p partition is available for the topic (has leader). @@ -3580,7 +3605,7 @@ virtual int32_t id() const = 0; /** @returns Broker hostname */ - virtual const std::string host() const = 0; + virtual std::string host() const = 0; /** @returns Broker listening port */ virtual int port() const = 0; @@ -3639,7 +3664,7 @@ typedef PartitionMetadataVector::const_iterator PartitionMetadataIterator; /** @returns Topic name */ - virtual const std::string topic() const = 0; + virtual std::string topic() const = 0; /** @returns Partition list */ virtual const PartitionMetadataVector *partitions() const = 0; @@ -3685,7 +3710,7 @@ virtual int32_t orig_broker_id() const = 0; /** @brief Broker (name) originating this metadata */ - virtual const std::string orig_broker_name() const = 0; + virtual std::string orig_broker_name() const = 0; virtual ~Metadata() = 0; }; diff -Nru librdkafka-1.9.2/src-cpp/rdkafkacpp_int.h librdkafka-2.0.2/src-cpp/rdkafkacpp_int.h --- librdkafka-1.9.2/src-cpp/rdkafkacpp_int.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/src-cpp/rdkafkacpp_int.h 2023-01-20 09:14:36.000000000 +0000 @@ -117,7 +117,7 @@ public: ~ErrorImpl() { rd_kafka_error_destroy(c_error_); - }; + } ErrorImpl(ErrorCode code, const std::string *errstr) { c_error_ = rd_kafka_error_new(static_cast(code), @@ -125,7 +125,8 @@ errstr ? errstr->c_str() : NULL); } - ErrorImpl(rd_kafka_error_t *c_error) : c_error_(c_error) {}; + ErrorImpl(rd_kafka_error_t *c_error) : c_error_(c_error) { + } static Error *create(ErrorCode code, const std::string *errstr) { return new ErrorImpl(code, errstr); @@ -161,7 +162,8 @@ class EventImpl : public Event { public: - ~EventImpl() {}; + ~EventImpl() { + } EventImpl(Type type, ErrorCode err, @@ -175,7 +177,8 @@ str_(str), id_(0), throttle_time_(0), - fatal_(false) {}; + fatal_(false) { + } EventImpl(Type type) : type_(type), @@ -185,7 +188,8 @@ str_(""), id_(0), throttle_time_(0), - fatal_(false) {}; + fatal_(false) { + } Type type() const { return type_; @@ -379,7 +383,7 @@ delete key_; if (headers_) delete headers_; - }; + } MessageImpl(rd_kafka_type_t rk_type, RdKafka::Topic *topic, @@ -495,7 +499,7 @@ void *msg_opaque() const { return rkmessage_->_private; - }; + } int64_t latency() const { return rd_kafka_message_latency(rkmessage_); @@ -990,12 +994,14 @@ class HandleImpl : virtual public Handle { public: - ~HandleImpl() {}; - HandleImpl() {}; - const std::string name() const { + ~HandleImpl() { + } + HandleImpl() { + } + std::string name() const { return std::string(rd_kafka_name(rk_)); - }; - const std::string memberid() const { + } + std::string memberid() const { char *str = rd_kafka_memberid(rk_); std::string memberid = str ? str : ""; if (str) @@ -1004,10 +1010,10 @@ } int poll(int timeout_ms) { return rd_kafka_poll(rk_, timeout_ms); - }; + } int outq_len() { return rd_kafka_outq_len(rk_); - }; + } void set_common_config(const RdKafka::ConfImpl *confimpl); @@ -1075,7 +1081,7 @@ rd_kafka_yield(rk_); } - const std::string clusterid(int timeout_ms) { + std::string clusterid(int timeout_ms) { char *str = rd_kafka_clusterid(rk_, timeout_ms); std::string clusterid = str ? str : ""; if (str) @@ -1127,7 +1133,7 @@ ErrorCode oauthbearer_set_token_failure(const std::string &errstr) { return static_cast( rd_kafka_oauthbearer_set_token_failure(rk_, errstr.c_str())); - }; + } Error *sasl_background_callbacks_enable() { rd_kafka_error_t *c_error = rd_kafka_sasl_background_callbacks_enable(rk_); @@ -1138,13 +1144,24 @@ return NULL; } + Error *sasl_set_credentials(const std::string &username, + const std::string &password) { + rd_kafka_error_t *c_error = + rd_kafka_sasl_set_credentials(rk_, username.c_str(), password.c_str()); + + if (c_error) + return new ErrorImpl(c_error); + + return NULL; + }; + void *mem_malloc(size_t size) { return rd_kafka_mem_malloc(rk_, size); - }; + } void mem_free(void *ptr) { rd_kafka_mem_free(rk_, ptr); - }; + } rd_kafka_t *rk_; /* All Producer and Consumer callbacks must reside in HandleImpl and @@ -1171,7 +1188,7 @@ rd_kafka_topic_destroy(rkt_); } - const std::string name() const { + std::string name() const { return rd_kafka_topic_name(rkt_); } @@ -1201,7 +1218,8 @@ */ class TopicPartitionImpl : public TopicPartition { public: - ~TopicPartitionImpl() {}; + ~TopicPartitionImpl() { + } static TopicPartition *create(const std::string &topic, int partition); @@ -1371,7 +1389,7 @@ bool closed() { return rd_kafka_consumer_closed(rk_) ? true : false; - }; + } ErrorCode seek(const TopicPartition &partition, int timeout_ms); @@ -1398,7 +1416,7 @@ return &topics_; } - const std::string orig_broker_name() const { + std::string orig_broker_name() const { return std::string(metadata_->orig_broker_name); } @@ -1420,7 +1438,7 @@ ~ConsumerImpl() { if (rk_) rd_kafka_destroy(rk_); - }; + } static Consumer *create(Conf *conf, std::string &errstr); ErrorCode start(Topic *topic, int32_t partition, int64_t offset); @@ -1453,7 +1471,7 @@ ~ProducerImpl() { if (rk_) rd_kafka_destroy(rk_); - }; + } ErrorCode produce(Topic *topic, int32_t partition, diff -Nru librdkafka-1.9.2/tests/0004-conf.c librdkafka-2.0.2/tests/0004-conf.c --- librdkafka-1.9.2/tests/0004-conf.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0004-conf.c 2023-01-20 09:14:36.000000000 +0000 @@ -716,6 +716,7 @@ "Expected rd_kafka_new() to fail with " "invalid ssl.ca.location"); TEST_SAY("rd_kafka_new() failed as expected: %s\n", errstr); + rd_kafka_conf_destroy(conf); } #ifdef _WIN32 diff -Nru librdkafka-1.9.2/tests/0026-consume_pause.c librdkafka-2.0.2/tests/0026-consume_pause.c --- librdkafka-1.9.2/tests/0026-consume_pause.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0026-consume_pause.c 2023-01-20 09:14:36.000000000 +0000 @@ -485,6 +485,8 @@ TEST_SAY("Assigning partition\n"); TEST_CALL_ERR__(rd_kafka_assign(rk, parts)); + rd_kafka_topic_partition_list_destroy(parts); + TEST_SAY("Consuming messages 0..100\n"); test_msgver_init(&mv, testid); diff -Nru librdkafka-1.9.2/tests/0052-msg_timestamps.c librdkafka-2.0.2/tests/0052-msg_timestamps.c --- librdkafka-1.9.2/tests/0052-msg_timestamps.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0052-msg_timestamps.c 2023-01-20 09:14:36.000000000 +0000 @@ -184,6 +184,11 @@ if (!test_can_create_topics(1)) return 0; + if (test_needs_auth()) { + TEST_SKIP("Test cluster requires authentication/SSL\n"); + return 0; + } + /* Broker version limits the producer's feature set, * for 0.9.0.0 no timestamp will be transmitted, * but for 0.10.1.0 (or newer, api.version.request will be true) diff -Nru librdkafka-1.9.2/tests/0077-compaction.c librdkafka-2.0.2/tests/0077-compaction.c --- librdkafka-1.9.2/tests/0077-compaction.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0077-compaction.c 2023-01-20 09:14:36.000000000 +0000 @@ -328,6 +328,11 @@ if (!test_can_create_topics(1)) return 0; + if (test_needs_auth()) { + TEST_SKIP("Test cluster requires authentication/SSL\n"); + return 0; + } + do_test_compaction(10, NULL); if (test_quick) { diff -Nru librdkafka-1.9.2/tests/0080-admin_ut.c librdkafka-2.0.2/tests/0080-admin_ut.c --- librdkafka-1.9.2/tests/0080-admin_ut.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0080-admin_ut.c 2023-01-20 09:14:36.000000000 +0000 @@ -497,6 +497,252 @@ SUB_TEST_QUICK(); } +/** + * @brief ListConsumerGroups tests + * + * + * + */ +static void do_test_ListConsumerGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options, + rd_bool_t destroy) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_ListConsumerGroups_result_t *res; + const rd_kafka_error_t **errors; + size_t errors_cnt, valid_cnt; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s ListConsumerGroups with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (with_options) { + rd_kafka_consumer_group_state_t duplicate[2] = { + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY, + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY}; + + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); + + /* Test duplicate error on match states */ + rd_kafka_error_t *error = + rd_kafka_AdminOptions_set_match_consumer_group_states( + options, duplicate, 2); + TEST_ASSERT(error && rd_kafka_error_code(error), "%s", + "Expected error on duplicate states," + " got no error"); + rd_kafka_error_destroy(error); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr))); + + if (useq) { + my_opaque = (void *)456; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + TIMING_START(&timing, "ListConsumerGroups"); + TEST_SAY("Call ListConsumerGroups, timeout is %dms\n", exp_timeout); + rd_kafka_ListConsumerGroups(rk, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + if (destroy) + goto destroy; + + /* Poll result queue */ + TIMING_START(&timing, "ListConsumerGroups.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("ListConsumerGroups: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_ListConsumerGroups_result(rkev); + TEST_ASSERT(res, "expected ListConsumerGroups_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting no error here, the real error will be in the error array */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT( + err == RD_KAFKA_RESP_ERR_NO_ERROR, + "expected ListConsumerGroups to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR), rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + errors = rd_kafka_ListConsumerGroups_result_errors(rkev, &errors_cnt); + TEST_ASSERT(errors_cnt == 1, "expected one error, got %" PRIu64, + errors_cnt); + rd_kafka_ListConsumerGroups_result_valid(rkev, &valid_cnt); + TEST_ASSERT(valid_cnt == 0, "expected zero valid groups, got %" PRIu64, + valid_cnt); + + err = rd_kafka_error_code(errors[0]); + errstr2 = rd_kafka_error_string(errors[0]); + TEST_ASSERT( + err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected ListConsumerGroups to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT), + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + rd_kafka_event_destroy(rkev); + +destroy: + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + SUB_TEST_PASS(); +} + +/** + * @brief DescribeConsumerGroups tests + * + * + * + */ +static void do_test_DescribeConsumerGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options, + rd_bool_t destroy) { + rd_kafka_queue_t *q; +#define TEST_DESCRIBE_CONSUMER_GROUPS_CNT 4 + const char *group_names[TEST_DESCRIBE_CONSUMER_GROUPS_CNT]; + rd_kafka_AdminOptions_t *options = NULL; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + const char *errstr2; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + const rd_kafka_DeleteGroups_result_t *res; + const rd_kafka_ConsumerGroupDescription_t **resgroups; + size_t resgroup_cnt; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s DescribeConsumerGroups with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + group_names[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + } + + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + + if (useq) { + my_opaque = (void *)456; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + TIMING_START(&timing, "DescribeConsumerGroups"); + TEST_SAY("Call DescribeConsumerGroups, timeout is %dms\n", exp_timeout); + rd_kafka_DescribeConsumerGroups( + rk, group_names, TEST_DESCRIBE_CONSUMER_GROUPS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + if (destroy) + goto destroy; + + /* Poll result queue */ + TIMING_START(&timing, "DescribeConsumerGroups.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("DescribeConsumerGroups: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_DescribeConsumerGroups_result(rkev); + TEST_ASSERT(res, "expected DescribeConsumerGroups_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting no error (errors will be per-group) */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT( + err == RD_KAFKA_RESP_ERR_NO_ERROR, + "expected DescribeConsumerGroups to return error %s, not %s (%s)", + rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR), rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + /* Extract groups, should return TEST_DESCRIBE_GROUPS_CNT groups. */ + resgroups = + rd_kafka_DescribeConsumerGroups_result_groups(res, &resgroup_cnt); + TEST_ASSERT(resgroups && + resgroup_cnt == TEST_DESCRIBE_CONSUMER_GROUPS_CNT, + "expected %d result_groups, got %p cnt %" PRIusz, + TEST_DESCRIBE_CONSUMER_GROUPS_CNT, resgroups, resgroup_cnt); + + /* The returned groups should be in the original order, and + * should all have timed out. */ + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + TEST_ASSERT( + !strcmp(group_names[i], + rd_kafka_ConsumerGroupDescription_group_id( + resgroups[i])), + "expected group '%s' at position %d, not '%s'", + group_names[i], i, + rd_kafka_ConsumerGroupDescription_group_id(resgroups[i])); + TEST_ASSERT( + rd_kafka_error_code(rd_kafka_ConsumerGroupDescription_error( + resgroups[i])) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected group '%s' to have timed out, got %s", + group_names[i], + rd_kafka_error_string( + rd_kafka_ConsumerGroupDescription_error(resgroups[i]))); + } + + rd_kafka_event_destroy(rkev); + +destroy: + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + rd_free((char *)group_names[i]); + } + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); +#undef TEST_DESCRIBE_CONSUMER_GROUPS_CNT + + SUB_TEST_PASS(); +} + static void do_test_DeleteRecords(const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, @@ -689,7 +935,7 @@ rd_kafka_DeleteConsumerGroupOffsets_destroy_array(cgoffsets, MY_DEL_CGRPOFFS_CNT); -#undef MY_DEL_CGRPOFFSETS_CNT +#undef MY_DEL_CGRPOFFS_CNT SUB_TEST_PASS(); } @@ -1309,6 +1555,438 @@ } +static void do_test_AlterConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options) { + rd_kafka_queue_t *q; +#define MY_ALTER_CGRPOFFS_CNT 1 + rd_kafka_AdminOptions_t *options = NULL; + const rd_kafka_AlterConsumerGroupOffsets_result_t *res; + rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets[MY_ALTER_CGRPOFFS_CNT]; + rd_kafka_AlterConsumerGroupOffsets_t + *cgoffsets_empty[MY_ALTER_CGRPOFFS_CNT]; + rd_kafka_AlterConsumerGroupOffsets_t + *cgoffsets_negative[MY_ALTER_CGRPOFFS_CNT]; + rd_kafka_AlterConsumerGroupOffsets_t + *cgoffsets_duplicate[MY_ALTER_CGRPOFFS_CNT]; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + void *my_opaque = NULL, *opaque; + + SUB_TEST_QUICK("%s AlterConsumerGroupOffsets with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < MY_ALTER_CGRPOFFS_CNT; i++) { + /* Call with three correct topic partitions. */ + rd_kafka_topic_partition_list_t *partitions = + rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(partitions, "topic1", 9) + ->offset = 9; + rd_kafka_topic_partition_list_add(partitions, "topic3", 15) + ->offset = 15; + rd_kafka_topic_partition_list_add(partitions, "topic1", 1) + ->offset = 1; + cgoffsets[i] = rd_kafka_AlterConsumerGroupOffsets_new( + "mygroup", partitions); + rd_kafka_topic_partition_list_destroy(partitions); + + /* Call with empty topic-partition list. */ + rd_kafka_topic_partition_list_t *partitions_empty = + rd_kafka_topic_partition_list_new(0); + cgoffsets_empty[i] = rd_kafka_AlterConsumerGroupOffsets_new( + "mygroup", partitions_empty); + rd_kafka_topic_partition_list_destroy(partitions_empty); + + /* Call with a topic-partition having negative offset. */ + rd_kafka_topic_partition_list_t *partitions_negative = + rd_kafka_topic_partition_list_new(4); + rd_kafka_topic_partition_list_add(partitions_negative, "topic1", + 9) + ->offset = 9; + rd_kafka_topic_partition_list_add(partitions_negative, "topic3", + 15) + ->offset = 15; + rd_kafka_topic_partition_list_add(partitions_negative, "topic1", + 1) + ->offset = 1; + rd_kafka_topic_partition_list_add(partitions_negative, "topic1", + 2) + ->offset = -3; + cgoffsets_negative[i] = rd_kafka_AlterConsumerGroupOffsets_new( + "mygroup", partitions_negative); + rd_kafka_topic_partition_list_destroy(partitions_negative); + + /* Call with duplicate partitions. */ + rd_kafka_topic_partition_list_t *partitions_duplicate = + rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(partitions_duplicate, + "topic1", 9) + ->offset = 9; + rd_kafka_topic_partition_list_add(partitions_duplicate, + "topic3", 15) + ->offset = 15; + rd_kafka_topic_partition_list_add(partitions_duplicate, + "topic1", 9) + ->offset = 1; + + cgoffsets_duplicate[i] = rd_kafka_AlterConsumerGroupOffsets_new( + "mygroup", partitions_duplicate); + rd_kafka_topic_partition_list_destroy(partitions_duplicate); + } + + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + + if (useq) { + my_opaque = (void *)99981; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + /* Empty topic-partition list */ + TIMING_START(&timing, "AlterConsumerGroupOffsets"); + TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_empty, + MY_ALTER_CGRPOFFS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 10); + rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_empty, + MY_ALTER_CGRPOFFS_CNT); + + /* Poll result queue */ + TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, 0, 10); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + /* Expecting error */ + err = rd_kafka_event_error(rkev); + const char *event_errstr_empty = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail"); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, + "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s", + rd_kafka_err2name(err)); + TEST_ASSERT(strcmp(event_errstr_empty, + "Non-empty topic partition list must be present") == + 0, + "expected \"Non-empty topic partition list must be " + "present\", not \"%s\"", + event_errstr_empty); + rd_kafka_event_destroy(rkev); + + /* Negative topic-partition offset */ + TIMING_START(&timing, "AlterConsumerGroupOffsets"); + TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_negative, + MY_ALTER_CGRPOFFS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 10); + rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_negative, + MY_ALTER_CGRPOFFS_CNT); + /* Poll result queue */ + TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, 0, 10); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + /* Expecting error */ + err = rd_kafka_event_error(rkev); + const char *event_errstr_negative = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail"); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, + "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s", + rd_kafka_err2name(err)); + TEST_ASSERT( + strcmp(event_errstr_negative, + "All topic-partition offsets must be >= 0") == 0, + "expected \"All topic-partition offsets must be >= 0\", not \"%s\"", + event_errstr_negative); + rd_kafka_event_destroy(rkev); + + /* Duplicate topic-partition offset */ + TIMING_START(&timing, "AlterConsumerGroupOffsets"); + TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_duplicate, + MY_ALTER_CGRPOFFS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 10); + rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_duplicate, + MY_ALTER_CGRPOFFS_CNT); + /* Poll result queue */ + TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, 0, 10); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + /* Expecting error */ + err = rd_kafka_event_error(rkev); + const char *event_errstr_duplicate = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail"); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, + "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s", + rd_kafka_err2name(err)); + TEST_ASSERT(strcmp(event_errstr_duplicate, + "Duplicate partitions not allowed") == 0, + "expected \"Duplicate partitions not allowed\", not \"%s\"", + event_errstr_duplicate); + rd_kafka_event_destroy(rkev); + + /* Correct topic-partition list, local timeout */ + TIMING_START(&timing, "AlterConsumerGroupOffsets"); + TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets, MY_ALTER_CGRPOFFS_CNT, + options, q); + TIMING_ASSERT_LATER(&timing, 0, 10); + /* Poll result queue */ + TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + /* Expecting error */ + err = rd_kafka_event_error(rkev); + const char *event_errstr = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail"); + TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT, + "expected RD_KAFKA_RESP_ERR__TIMED_OUT, not %s", + rd_kafka_err2name(err)); + TEST_ASSERT(strcmp(event_errstr, + "Failed while waiting for response from broker: " + "Local: Timed out") == 0, + "expected \"Failed while waiting for response from broker: " + "Local: Timed out\", not \"%s\"", + event_errstr); + rd_kafka_event_destroy(rkev); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets, + MY_ALTER_CGRPOFFS_CNT); + +#undef MY_ALTER_CGRPOFFS_CNT + + SUB_TEST_PASS(); +} + + +static void do_test_ListConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int with_options, + rd_bool_t null_toppars) { + rd_kafka_queue_t *q; +#define MY_LIST_CGRPOFFS_CNT 1 + rd_kafka_AdminOptions_t *options = NULL; + const rd_kafka_ListConsumerGroupOffsets_result_t *res; + rd_kafka_ListConsumerGroupOffsets_t *cgoffsets[MY_LIST_CGRPOFFS_CNT]; + rd_kafka_ListConsumerGroupOffsets_t + *cgoffsets_empty[MY_LIST_CGRPOFFS_CNT]; + rd_kafka_ListConsumerGroupOffsets_t + *cgoffsets_duplicate[MY_LIST_CGRPOFFS_CNT]; + int exp_timeout = MY_SOCKET_TIMEOUT_MS; + int i; + char errstr[512]; + rd_kafka_resp_err_t err; + test_timing_t timing; + rd_kafka_event_t *rkev; + void *my_opaque = NULL, *opaque; + const char *errstr_ptr; + + SUB_TEST_QUICK("%s ListConsumerGroupOffsets with %s, timeout %dms", + rd_kafka_name(rk), what, exp_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + for (i = 0; i < MY_LIST_CGRPOFFS_CNT; i++) { + rd_kafka_topic_partition_list_t *partitions = + rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(partitions, "topic1", 9); + rd_kafka_topic_partition_list_add(partitions, "topic3", 15); + rd_kafka_topic_partition_list_add(partitions, "topic1", 1); + if (null_toppars) { + cgoffsets[i] = rd_kafka_ListConsumerGroupOffsets_new( + "mygroup", NULL); + } else { + cgoffsets[i] = rd_kafka_ListConsumerGroupOffsets_new( + "mygroup", partitions); + } + rd_kafka_topic_partition_list_destroy(partitions); + + rd_kafka_topic_partition_list_t *partitions_empty = + rd_kafka_topic_partition_list_new(0); + cgoffsets_empty[i] = rd_kafka_ListConsumerGroupOffsets_new( + "mygroup", partitions_empty); + rd_kafka_topic_partition_list_destroy(partitions_empty); + + partitions = rd_kafka_topic_partition_list_new(3); + rd_kafka_topic_partition_list_add(partitions, "topic1", 9); + rd_kafka_topic_partition_list_add(partitions, "topic3", 15); + rd_kafka_topic_partition_list_add(partitions, "topic1", 9); + cgoffsets_duplicate[i] = rd_kafka_ListConsumerGroupOffsets_new( + "mygroup", partitions); + rd_kafka_topic_partition_list_destroy(partitions); + } + + if (with_options) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS); + + exp_timeout = MY_SOCKET_TIMEOUT_MS * 2; + + err = rd_kafka_AdminOptions_set_request_timeout( + options, exp_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + + if (useq) { + my_opaque = (void *)99981; + rd_kafka_AdminOptions_set_opaque(options, my_opaque); + } + } + + TEST_SAY( + "Call ListConsumerGroupOffsets with empty topic-partition list.\n"); + rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets_empty, + MY_LIST_CGRPOFFS_CNT, options, q); + rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets_empty, + MY_LIST_CGRPOFFS_CNT); + /* Poll result queue */ + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TEST_SAY("ListConsumerGroupOffsets: got %s\n", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail"); + + errstr_ptr = rd_kafka_event_error_string(rkev); + TEST_ASSERT( + !strcmp(errstr_ptr, + "NULL or non-empty topic partition list must be passed"), + "expected error string \"NULL or non-empty topic partition list " + "must be passed\", not %s", + errstr_ptr); + + rd_kafka_event_destroy(rkev); + + + TEST_SAY( + "Call ListConsumerGroupOffsets with topic-partition list" + "containing duplicates.\n"); + rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets_duplicate, 1, options, + q); + rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets_duplicate, + MY_LIST_CGRPOFFS_CNT); + /* Poll result queue */ + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TEST_SAY("ListConsumerGroupOffsets: got %s\n", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail"); + + errstr_ptr = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!strcmp(errstr_ptr, "Duplicate partitions not allowed"), + "expected error string \"Duplicate partitions not allowed\"" + ", not %s", + errstr_ptr); + + rd_kafka_event_destroy(rkev); + + + TIMING_START(&timing, "ListConsumerGroupOffsets"); + TEST_SAY("Call ListConsumerGroupOffsets, timeout is %dms\n", + exp_timeout); + rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets, MY_LIST_CGRPOFFS_CNT, + options, q); + rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets, + MY_LIST_CGRPOFFS_CNT); + TIMING_ASSERT_LATER(&timing, 0, 10); + + /* Poll result queue */ + TIMING_START(&timing, "ListConsumerGroupOffsets.queue_poll"); + rkev = rd_kafka_queue_poll(q, exp_timeout + 1000); + TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100); + TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout); + TEST_SAY("ListConsumerGroupOffsets: got %s in %.3fs\n", + rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f); + + /* Convert event to proper result */ + res = rd_kafka_event_ListConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected ListConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + + opaque = rd_kafka_event_opaque(rkev); + TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p", + my_opaque, opaque); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail"); + + errstr_ptr = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!strcmp(errstr_ptr, + "Failed while waiting for response from broker: " + "Local: Timed out"), + "expected error string \"Failed while waiting for response " + "from broker: Local: Timed out\", not %s", + errstr_ptr); + + rd_kafka_event_destroy(rkev); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + +#undef MY_LIST_CGRPOFFS_CNT + + SUB_TEST_PASS(); +} + /** * @brief Test a mix of APIs using the same replyq. @@ -1557,17 +2235,21 @@ RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, \ RD_KAFKA_ADMIN_OP_ALTERCONFIGS, \ RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, \ - RD_KAFKA_ADMIN_OP_DELETEGROUPS, \ RD_KAFKA_ADMIN_OP_DELETERECORDS, \ - RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, \ RD_KAFKA_ADMIN_OP_CREATEACLS, \ RD_KAFKA_ADMIN_OP_DESCRIBEACLS, \ RD_KAFKA_ADMIN_OP_DELETEACLS, \ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, \ + RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, \ + RD_KAFKA_ADMIN_OP_DELETEGROUPS, \ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, \ + RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS, \ + RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, \ RD_KAFKA_ADMIN_OP_ANY /* Must be last */ \ } struct { const char *setter; - const rd_kafka_admin_op_t valid_apis[12]; + const rd_kafka_admin_op_t valid_apis[16]; } matrix[] = { {"request_timeout", _all_apis}, {"operation_timeout", @@ -1579,11 +2261,17 @@ RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, RD_KAFKA_ADMIN_OP_ALTERCONFIGS}}, {"broker", _all_apis}, + {"require_stable_offsets", + {RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS}}, + {"match_consumer_group_states", + {RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS}}, {"opaque", _all_apis}, {NULL}, }; int i; rd_kafka_AdminOptions_t *options; + rd_kafka_consumer_group_state_t state[1] = { + RD_KAFKA_CONSUMER_GROUP_STATE_STABLE}; SUB_TEST_QUICK(); @@ -1595,6 +2283,7 @@ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR; rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_error_t *error = NULL; char errstr[512]; int fi; @@ -1615,6 +2304,16 @@ else if (!strcmp(matrix[i].setter, "broker")) err = rd_kafka_AdminOptions_set_broker( options, 5, errstr, sizeof(errstr)); + else if (!strcmp(matrix[i].setter, + "require_stable_offsets")) + error = + rd_kafka_AdminOptions_set_require_stable_offsets( + options, 0); + else if (!strcmp(matrix[i].setter, + "match_consumer_group_states")) + error = + rd_kafka_AdminOptions_set_match_consumer_group_states( + options, state, 1); else if (!strcmp(matrix[i].setter, "opaque")) { rd_kafka_AdminOptions_set_opaque( options, (void *)options); @@ -1623,6 +2322,13 @@ TEST_FAIL("Invalid setter: %s", matrix[i].setter); + if (error) { + err = rd_kafka_error_code(error); + snprintf(errstr, sizeof(errstr), "%s", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); + } + TEST_SAYL(3, "AdminOptions_set_%s on " @@ -1725,6 +2431,19 @@ do_test_DeleteTopics("temp queue, options", rk, NULL, 1); do_test_DeleteTopics("main queue, options", rk, mainq, 1); + do_test_ListConsumerGroups("temp queue, no options", rk, NULL, 0, + rd_false); + do_test_ListConsumerGroups("temp queue, options", rk, NULL, 1, + rd_false); + do_test_ListConsumerGroups("main queue", rk, mainq, 0, rd_false); + + do_test_DescribeConsumerGroups("temp queue, no options", rk, NULL, 0, + rd_false); + do_test_DescribeConsumerGroups("temp queue, options", rk, NULL, 1, + rd_false); + do_test_DescribeConsumerGroups("main queue, options", rk, mainq, 1, + rd_false); + do_test_DeleteGroups("temp queue, no options", rk, NULL, 0, rd_false); do_test_DeleteGroups("temp queue, options", rk, NULL, 1, rd_false); do_test_DeleteGroups("main queue, options", rk, mainq, 1, rd_false); @@ -1758,6 +2477,24 @@ do_test_DeleteAcls("temp queue, options", rk, NULL, rd_false, rd_true); do_test_DeleteAcls("main queue, options", rk, mainq, rd_false, rd_true); + do_test_AlterConsumerGroupOffsets("temp queue, no options", rk, NULL, + 0); + do_test_AlterConsumerGroupOffsets("temp queue, options", rk, NULL, 1); + do_test_AlterConsumerGroupOffsets("main queue, options", rk, mainq, 1); + + do_test_ListConsumerGroupOffsets("temp queue, no options", rk, NULL, 0, + rd_false); + do_test_ListConsumerGroupOffsets("temp queue, options", rk, NULL, 1, + rd_false); + do_test_ListConsumerGroupOffsets("main queue, options", rk, mainq, 1, + rd_false); + do_test_ListConsumerGroupOffsets("temp queue, no options", rk, NULL, 0, + rd_true); + do_test_ListConsumerGroupOffsets("temp queue, options", rk, NULL, 1, + rd_true); + do_test_ListConsumerGroupOffsets("main queue, options", rk, mainq, 1, + rd_true); + do_test_mix(rk, mainq); do_test_configs(rk, mainq); diff -Nru librdkafka-1.9.2/tests/0081-admin.c librdkafka-2.0.2/tests/0081-admin.c --- librdkafka-1.9.2/tests/0081-admin.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0081-admin.c 2023-01-20 09:14:36.000000000 +0000 @@ -28,6 +28,7 @@ #include "test.h" #include "rdkafka.h" +#include "../src/rdstring.h" /** * @brief Admin API integration tests. @@ -2233,7 +2234,7 @@ static void do_test_DeleteGroups(const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, - int op_timeout) { + int request_timeout) { rd_kafka_queue_t *q; rd_kafka_AdminOptions_t *options = NULL; rd_kafka_event_t *rkev = NULL; @@ -2255,16 +2256,16 @@ rd_kafka_DeleteGroup_t *del_groups[MY_DEL_GROUPS_CNT]; const rd_kafka_DeleteGroups_result_t *res; - SUB_TEST_QUICK("%s DeleteGroups with %s, op_timeout %d", - rd_kafka_name(rk), what, op_timeout); + SUB_TEST_QUICK("%s DeleteGroups with %s, request_timeout %d", + rd_kafka_name(rk), what, request_timeout); q = useq ? useq : rd_kafka_queue_new(rk); - if (op_timeout != -1) { + if (request_timeout != -1) { options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); - err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } @@ -2385,6 +2386,468 @@ SUB_TEST_PASS(); } +/** + * @brief Test list groups, creating consumers for a set of groups, + * listing and deleting them at the end. + */ +static void do_test_ListConsumerGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int request_timeout, + rd_bool_t match_states) { +#define TEST_LIST_CONSUMER_GROUPS_CNT 4 + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + size_t valid_cnt, error_cnt; + rd_bool_t is_simple_consumer_group; + rd_kafka_consumer_group_state_t state; + char errstr[512]; + const char *errstr2, *group_id; + char *list_consumer_groups[TEST_LIST_CONSUMER_GROUPS_CNT]; + const int partitions_cnt = 1; + const int msgs_cnt = 100; + size_t i, found; + char *topic; + rd_kafka_metadata_topic_t exp_mdtopic = {0}; + int64_t testid = test_id_generate(); + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + const rd_kafka_ListConsumerGroups_result_t *res; + const rd_kafka_ConsumerGroupListing_t **groups; + + SUB_TEST_QUICK( + "%s ListConsumerGroups with %s, request_timeout %d" + ", match_states %s", + rd_kafka_name(rk), what, request_timeout, RD_STR_ToF(match_states)); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (request_timeout != -1) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); + + if (match_states) { + rd_kafka_consumer_group_state_t empty = + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY; + + TEST_CALL_ERROR__( + rd_kafka_AdminOptions_set_match_consumer_group_states( + options, &empty, 1)); + } + + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr))); + } + + + topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + exp_mdtopic.topic = topic; + + /* Create the topics first. */ + test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000); + + /* Produce 100 msgs */ + test_produce_msgs_easy(topic, testid, 0, msgs_cnt); + + for (i = 0; i < TEST_LIST_CONSUMER_GROUPS_CNT; i++) { + char *group = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + test_consume_msgs_easy(group, topic, testid, -1, msgs_cnt, + NULL); + list_consumer_groups[i] = group; + } + + TIMING_START(&timing, "ListConsumerGroups"); + TEST_SAY("Call ListConsumerGroups\n"); + rd_kafka_ListConsumerGroups(rk, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + TIMING_START(&timing, "ListConsumerGroups.queue_poll"); + + /* Poll result queue for ListConsumerGroups result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); + TEST_SAY("ListConsumerGroups: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) { + break; + } + + rd_kafka_event_destroy(rkev); + } + /* Convert event to proper result */ + res = rd_kafka_event_ListConsumerGroups_result(rkev); + TEST_ASSERT(res, "expected ListConsumerGroups_result, got %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == exp_err, + "expected ListConsumerGroups to return %s, got %s (%s)", + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + TEST_SAY("ListConsumerGroups: returned %s (%s)\n", + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + groups = rd_kafka_ListConsumerGroups_result_valid(res, &valid_cnt); + rd_kafka_ListConsumerGroups_result_errors(res, &error_cnt); + + /* Other tests could be running */ + TEST_ASSERT(valid_cnt >= TEST_LIST_CONSUMER_GROUPS_CNT, + "expected ListConsumerGroups to return at least %" PRId32 + " valid groups," + " got %zu", + TEST_LIST_CONSUMER_GROUPS_CNT, valid_cnt); + + TEST_ASSERT(error_cnt == 0, + "expected ListConsumerGroups to return 0 errors," + " got %zu", + error_cnt); + + found = 0; + for (i = 0; i < valid_cnt; i++) { + int j; + const rd_kafka_ConsumerGroupListing_t *group; + group = groups[i]; + group_id = rd_kafka_ConsumerGroupListing_group_id(group); + is_simple_consumer_group = + rd_kafka_ConsumerGroupListing_is_simple_consumer_group( + group); + state = rd_kafka_ConsumerGroupListing_state(group); + for (j = 0; j < TEST_LIST_CONSUMER_GROUPS_CNT; j++) { + if (!strcmp(list_consumer_groups[j], group_id)) { + found++; + TEST_ASSERT(!is_simple_consumer_group, + "expected a normal group," + " got a simple group"); + + TEST_ASSERT( + state == + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY, + "expected an Empty state," + " got state %s", + rd_kafka_consumer_group_state_name(state)); + break; + } + } + } + TEST_ASSERT(found == TEST_LIST_CONSUMER_GROUPS_CNT, + "expected to find %d" + " started groups," + " got %" PRIusz, + TEST_LIST_CONSUMER_GROUPS_CNT, found); + + rd_kafka_event_destroy(rkev); + + test_DeleteGroups_simple(rk, NULL, (char **)list_consumer_groups, + TEST_LIST_CONSUMER_GROUPS_CNT, NULL); + + for (i = 0; i < TEST_LIST_CONSUMER_GROUPS_CNT; i++) { + rd_free(list_consumer_groups[i]); + } + + rd_free(topic); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); +#undef TEST_LIST_CONSUMER_GROUPS_CNT + + SUB_TEST_PASS(); +} + +typedef struct expected_DescribeConsumerGroups_result { + char *group_id; + rd_kafka_resp_err_t err; +} expected_DescribeConsumerGroups_result_t; + + +/** + * @brief Test describe groups, creating consumers for a set of groups, + * describing and deleting them at the end. + */ +static void do_test_DescribeConsumerGroups(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int request_timeout) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + char errstr[512]; + const char *errstr2; +#define TEST_DESCRIBE_CONSUMER_GROUPS_CNT 4 + int known_groups = TEST_DESCRIBE_CONSUMER_GROUPS_CNT - 1; + int i; + const int partitions_cnt = 1; + const int msgs_cnt = 100; + char *topic; + rd_kafka_metadata_topic_t exp_mdtopic = {0}; + int64_t testid = test_id_generate(); + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + const rd_kafka_ConsumerGroupDescription_t **results = NULL; + expected_DescribeConsumerGroups_result_t + expected[TEST_DESCRIBE_CONSUMER_GROUPS_CNT] = RD_ZERO_INIT; + const char *describe_groups[TEST_DESCRIBE_CONSUMER_GROUPS_CNT]; + char group_instance_ids[TEST_DESCRIBE_CONSUMER_GROUPS_CNT][512]; + char client_ids[TEST_DESCRIBE_CONSUMER_GROUPS_CNT][512]; + rd_kafka_t *rks[TEST_DESCRIBE_CONSUMER_GROUPS_CNT]; + const rd_kafka_DescribeConsumerGroups_result_t *res; + + SUB_TEST_QUICK("%s DescribeConsumerGroups with %s, request_timeout %d", + rd_kafka_name(rk), what, request_timeout); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (request_timeout != -1) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + + err = rd_kafka_AdminOptions_set_request_timeout( + options, request_timeout, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + } + + + topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + exp_mdtopic.topic = topic; + + /* Create the topics first. */ + test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000); + + /* Produce 100 msgs */ + test_produce_msgs_easy(topic, testid, 0, msgs_cnt); + + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + rd_kafka_conf_t *conf; + char *group_id = rd_strdup(test_mk_topic_name(__FUNCTION__, 1)); + if (i < known_groups) { + snprintf(group_instance_ids[i], + sizeof(group_instance_ids[i]), + "group_instance_id_%" PRId32, i); + snprintf(client_ids[i], sizeof(client_ids[i]), + "client_id_%" PRId32, i); + + test_conf_init(&conf, NULL, 0); + test_conf_set(conf, "client.id", client_ids[i]); + test_conf_set(conf, "group.instance.id", + group_instance_ids[i]); + test_conf_set(conf, "session.timeout.ms", "5000"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + rks[i] = + test_create_consumer(group_id, NULL, conf, NULL); + test_consumer_subscribe(rks[i], topic); + /* Consume messages */ + test_consumer_poll("consumer", rks[i], testid, -1, -1, + msgs_cnt, NULL); + } + expected[i].group_id = group_id; + expected[i].err = RD_KAFKA_RESP_ERR_NO_ERROR; + describe_groups[i] = group_id; + } + + TIMING_START(&timing, "DescribeConsumerGroups"); + TEST_SAY("Call DescribeConsumerGroups\n"); + rd_kafka_DescribeConsumerGroups( + rk, describe_groups, TEST_DESCRIBE_CONSUMER_GROUPS_CNT, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + TIMING_START(&timing, "DescribeConsumerGroups.queue_poll"); + + /* Poll result queue for DescribeConsumerGroups result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000)); + TEST_SAY("DescribeConsumerGroups: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) { + break; + } + + rd_kafka_event_destroy(rkev); + } + /* Convert event to proper result */ + res = rd_kafka_event_DescribeConsumerGroups_result(rkev); + TEST_ASSERT(res, "expected DescribeConsumerGroups_result, got %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(err == exp_err, + "expected DescribeConsumerGroups to return %s, got %s (%s)", + rd_kafka_err2str(exp_err), rd_kafka_err2str(err), + err ? errstr2 : "n/a"); + + TEST_SAY("DescribeConsumerGroups: returned %s (%s)\n", + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + size_t cnt = 0; + results = rd_kafka_DescribeConsumerGroups_result_groups(res, &cnt); + + TEST_ASSERT( + TEST_DESCRIBE_CONSUMER_GROUPS_CNT == cnt, + "expected DescribeConsumerGroups_result_groups to return %d items, " + "got %" PRIusz, + TEST_DESCRIBE_CONSUMER_GROUPS_CNT, cnt); + + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + expected_DescribeConsumerGroups_result_t *exp = &expected[i]; + rd_kafka_resp_err_t exp_err = exp->err; + const rd_kafka_ConsumerGroupDescription_t *act = results[i]; + rd_kafka_resp_err_t act_err = rd_kafka_error_code( + rd_kafka_ConsumerGroupDescription_error(act)); + rd_kafka_consumer_group_state_t state = + rd_kafka_ConsumerGroupDescription_state(act); + TEST_ASSERT( + strcmp(exp->group_id, + rd_kafka_ConsumerGroupDescription_group_id(act)) == + 0, + "Result order mismatch at #%d: expected group id to be " + "%s, got %s", + i, exp->group_id, + rd_kafka_ConsumerGroupDescription_group_id(act)); + if (i < known_groups) { + int member_count; + const rd_kafka_MemberDescription_t *member; + const rd_kafka_MemberAssignment_t *assignment; + const char *client_id; + const char *group_instance_id; + const rd_kafka_topic_partition_list_t *partitions; + + TEST_ASSERT(state == + RD_KAFKA_CONSUMER_GROUP_STATE_STABLE, + "Expected Stable state, got %s.", + rd_kafka_consumer_group_state_name(state)); + + TEST_ASSERT( + !rd_kafka_ConsumerGroupDescription_is_simple_consumer_group( + act), + "Expected a normal consumer group, got a simple " + "one."); + + member_count = + rd_kafka_ConsumerGroupDescription_member_count(act); + TEST_ASSERT(member_count == 1, + "Expected one member, got %d.", + member_count); + + member = + rd_kafka_ConsumerGroupDescription_member(act, 0); + + client_id = + rd_kafka_MemberDescription_client_id(member); + TEST_ASSERT(!strcmp(client_id, client_ids[i]), + "Expected client id \"%s\"," + " got \"%s\".", + client_ids[i], client_id); + + group_instance_id = + rd_kafka_MemberDescription_group_instance_id( + member); + TEST_ASSERT( + !strcmp(group_instance_id, group_instance_ids[i]), + "Expected group instance id \"%s\"," + " got \"%s\".", + group_instance_ids[i], group_instance_id); + + assignment = + rd_kafka_MemberDescription_assignment(member); + TEST_ASSERT(assignment != NULL, + "Expected non-NULL member assignment"); + + partitions = + rd_kafka_MemberAssignment_partitions(assignment); + TEST_ASSERT(partitions != NULL, + "Expected non-NULL member partitions"); + + TEST_SAY( + "Member client.id=\"%s\", " + "group.instance.id=\"%s\", " + "consumer_id=\"%s\", " + "host=\"%s\", assignment:\n", + rd_kafka_MemberDescription_client_id(member), + rd_kafka_MemberDescription_group_instance_id( + member), + rd_kafka_MemberDescription_consumer_id(member), + rd_kafka_MemberDescription_host(member)); + /* This is just to make sure the returned memory + * is valid. */ + test_print_partition_list(partitions); + } else { + TEST_ASSERT(state == RD_KAFKA_CONSUMER_GROUP_STATE_DEAD, + "Expected Dead state, got %s.", + rd_kafka_consumer_group_state_name(state)); + } + TEST_ASSERT(exp_err == act_err, + "expected err=%d for group %s, got %d (%s)", + exp_err, exp->group_id, act_err, + rd_kafka_err2str(act_err)); + } + + rd_kafka_event_destroy(rkev); + + for (i = 0; i < known_groups; i++) { + test_consumer_close(rks[i]); + rd_kafka_destroy(rks[i]); + } + + /* Wait session timeout + 1s. Because using static group membership */ + rd_sleep(6); + + test_DeleteGroups_simple(rk, NULL, (char **)describe_groups, + known_groups, NULL); + + for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) { + rd_free(expected[i].group_id); + } + + rd_free(topic); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); +#undef TEST_DESCRIBE_CONSUMER_GROUPS_CNT + + SUB_TEST_PASS(); +} /** * @brief Test deletion of committed offsets. @@ -2394,7 +2857,7 @@ static void do_test_DeleteConsumerGroupOffsets(const char *what, rd_kafka_t *rk, rd_kafka_queue_t *useq, - int op_timeout, + int req_timeout_ms, rd_bool_t sub_consumer) { rd_kafka_queue_t *q; rd_kafka_AdminOptions_t *options = NULL; @@ -2419,20 +2882,22 @@ rd_kafka_t *consumer; char *groupid; - SUB_TEST_QUICK("%s DeleteConsumerGroupOffsets with %s, op_timeout %d%s", - rd_kafka_name(rk), what, op_timeout, - sub_consumer ? ", with subscribing consumer" : ""); + SUB_TEST_QUICK( + "%s DeleteConsumerGroupOffsets with %s, req_timeout_ms %d%s", + rd_kafka_name(rk), what, req_timeout_ms, + sub_consumer ? ", with subscribing consumer" : ""); if (sub_consumer) exp_err = RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC; q = useq ? useq : rd_kafka_queue_new(rk); - if (op_timeout != -1) { - options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY); + if (req_timeout_ms != -1) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS); - err = rd_kafka_AdminOptions_set_operation_timeout( - options, op_timeout, errstr, sizeof(errstr)); + err = rd_kafka_AdminOptions_set_request_timeout( + options, req_timeout_ms, errstr, sizeof(errstr)); TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); } @@ -2634,33 +3099,544 @@ rd_kafka_queue_destroy(q); TEST_LATER_CHECK(); -#undef MY_DEL_RECORDS_CNT +#undef MY_TOPIC_CNT SUB_TEST_PASS(); } -static void do_test_apis(rd_kafka_type_t cltype) { - rd_kafka_t *rk; - rd_kafka_conf_t *conf; - rd_kafka_queue_t *mainq; +/** + * @brief Test altering of committed offsets. + * + * + */ +static void do_test_AlterConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int req_timeout_ms, + rd_bool_t sub_consumer, + rd_bool_t create_topics) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_topic_partition_list_t *orig_offsets, *offsets, *to_alter, + *committed, *alterd, *subscription = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + char errstr[512]; + const char *errstr2; +#define TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT 3 + int i; + const int partitions_cnt = 3; + char *topics[TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT]; + rd_kafka_metadata_topic_t + exp_mdtopics[TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT] = {{0}}; + int exp_mdtopic_cnt = 0; + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets; + const rd_kafka_AlterConsumerGroupOffsets_result_t *res; + const rd_kafka_group_result_t **gres; + size_t gres_cnt; + rd_kafka_t *consumer = NULL; + char *group_id; - /* Get the available brokers, but use a separate rd_kafka_t instance - * so we don't jinx the tests by having up-to-date metadata. */ - avail_brokers = test_get_broker_ids(NULL, &avail_broker_cnt); - TEST_SAY("%" PRIusz - " brokers in cluster " - "which will be used for replica sets\n", - avail_broker_cnt); + SUB_TEST_QUICK( + "%s AlterConsumerGroupOffsets with %s, " + "request_timeout %d%s", + rd_kafka_name(rk), what, req_timeout_ms, + sub_consumer ? ", with subscribing consumer" : ""); + + if (!create_topics) + exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART; + else if (sub_consumer) + exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID; + + if (sub_consumer && !create_topics) + TEST_FAIL( + "Can't use set sub_consumer and unset create_topics at the " + "same time"); - do_test_unclean_destroy(cltype, 0 /*tempq*/); - do_test_unclean_destroy(cltype, 1 /*mainq*/); + q = useq ? useq : rd_kafka_queue_new(rk); - test_conf_init(&conf, NULL, 180); - test_conf_set(conf, "socket.timeout.ms", "10000"); - rk = test_create_handle(cltype, conf); + if (req_timeout_ms != -1) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS); - mainq = rd_kafka_queue_get_main(rk); + err = rd_kafka_AdminOptions_set_request_timeout( + options, req_timeout_ms, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + } + + + subscription = rd_kafka_topic_partition_list_new( + TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT); + + for (i = 0; i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) { + char pfx[64]; + char *topic; + + rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i); + topic = rd_strdup(test_mk_topic_name(pfx, 1)); + + topics[i] = topic; + exp_mdtopics[exp_mdtopic_cnt++].topic = topic; + + rd_kafka_topic_partition_list_add(subscription, topic, + RD_KAFKA_PARTITION_UA); + } + + group_id = topics[0]; + + /* Create the topics first if needed. */ + if (create_topics) { + test_CreateTopics_simple( + rk, NULL, topics, + TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT, partitions_cnt, + NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, + NULL, 0, 15 * 1000); + + rd_sleep(1); /* Additional wait time for cluster propagation */ + + consumer = test_create_consumer(group_id, NULL, NULL, NULL); + + if (sub_consumer) { + TEST_CALL_ERR__( + rd_kafka_subscribe(consumer, subscription)); + test_consumer_wait_assignment(consumer, rd_true); + } + } + + orig_offsets = rd_kafka_topic_partition_list_new( + TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * partitions_cnt); + for (i = 0; + i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * partitions_cnt; + i++) + rd_kafka_topic_partition_list_add(orig_offsets, + topics[i / partitions_cnt], + i % partitions_cnt) + ->offset = (i + 1) * 10; + + /* Commit some offsets, if topics exists */ + if (create_topics) { + TEST_CALL_ERR__( + rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/)); + + /* Verify committed offsets match */ + committed = rd_kafka_topic_partition_list_copy(orig_offsets); + TEST_CALL_ERR__(rd_kafka_committed(consumer, committed, + tmout_multip(5 * 1000))); + + if (test_partition_list_cmp(committed, orig_offsets)) { + TEST_SAY("commit() list:\n"); + test_print_partition_list(orig_offsets); + TEST_SAY("committed() list:\n"); + test_print_partition_list(committed); + TEST_FAIL("committed offsets don't match"); + } + rd_kafka_topic_partition_list_destroy(committed); + } + + /* Now alter second half of the commits */ + offsets = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2); + to_alter = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2); + for (i = 0; i < orig_offsets->cnt; i++) { + if (i < orig_offsets->cnt / 2) + rd_kafka_topic_partition_list_add( + offsets, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); + else { + rd_kafka_topic_partition_list_add( + to_alter, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition) + ->offset = 5; + rd_kafka_topic_partition_list_add( + offsets, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition) + ->offset = 5; + } + } + + cgoffsets = rd_kafka_AlterConsumerGroupOffsets_new(group_id, to_alter); + + TIMING_START(&timing, "AlterConsumerGroupOffsets"); + TEST_SAY("Call AlterConsumerGroupOffsets\n"); + rd_kafka_AlterConsumerGroupOffsets(rk, &cgoffsets, 1, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + rd_kafka_AlterConsumerGroupOffsets_destroy(cgoffsets); + + TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll"); + /* Poll result queue for AlterConsumerGroupOffsets result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000)); + TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT) + break; + + rd_kafka_event_destroy(rkev); + } + + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, + "expected AlterConsumerGroupOffsets to succeed, " + "got %s (%s)", + rd_kafka_err2name(err), err ? errstr2 : "n/a"); + + TEST_SAY("AlterConsumerGroupOffsets: returned %s (%s)\n", + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + gres = rd_kafka_AlterConsumerGroupOffsets_result_groups(res, &gres_cnt); + TEST_ASSERT(gres && gres_cnt == 1, + "expected gres_cnt == 1, not %" PRIusz, gres_cnt); + + alterd = rd_kafka_topic_partition_list_copy( + rd_kafka_group_result_partitions(gres[0])); + + if (test_partition_list_cmp(alterd, to_alter)) { + TEST_SAY("Result list:\n"); + test_print_partition_list(alterd); + TEST_SAY("Partitions passed to AlterConsumerGroupOffsets:\n"); + test_print_partition_list(to_alter); + TEST_FAIL("altered/requested offsets don't match"); + } + + /* Verify expected errors */ + for (i = 0; i < alterd->cnt; i++) { + TEST_ASSERT_LATER(alterd->elems[i].err == exp_err, + "Result %s [%" PRId32 + "] has error %s, " + "expected %s", + alterd->elems[i].topic, + alterd->elems[i].partition, + rd_kafka_err2name(alterd->elems[i].err), + rd_kafka_err2name(exp_err)); + } + + TEST_LATER_CHECK(); + + rd_kafka_topic_partition_list_destroy(alterd); + rd_kafka_topic_partition_list_destroy(to_alter); + + rd_kafka_event_destroy(rkev); + + + /* Verify committed offsets match, if topics exist. */ + if (create_topics) { + committed = rd_kafka_topic_partition_list_copy(orig_offsets); + TEST_CALL_ERR__(rd_kafka_committed(consumer, committed, + tmout_multip(5 * 1000))); + + TEST_SAY("Original committed offsets:\n"); + test_print_partition_list(orig_offsets); + + TEST_SAY("Committed offsets after alter:\n"); + test_print_partition_list(committed); + + if (test_partition_list_cmp(committed, offsets)) { + TEST_SAY("expected list:\n"); + test_print_partition_list(offsets); + TEST_SAY("committed() list:\n"); + test_print_partition_list(committed); + TEST_FAIL("committed offsets don't match"); + } + rd_kafka_topic_partition_list_destroy(committed); + } + + rd_kafka_topic_partition_list_destroy(offsets); + rd_kafka_topic_partition_list_destroy(orig_offsets); + rd_kafka_topic_partition_list_destroy(subscription); + + for (i = 0; i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) + rd_free(topics[i]); + + if (create_topics) /* consumer is created only if topics are. */ + rd_kafka_destroy(consumer); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); +#undef TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT + + SUB_TEST_PASS(); +} + +/** + * @brief Test listing of committed offsets. + * + * + */ +static void do_test_ListConsumerGroupOffsets(const char *what, + rd_kafka_t *rk, + rd_kafka_queue_t *useq, + int req_timeout_ms, + rd_bool_t sub_consumer, + rd_bool_t null_toppars) { + rd_kafka_queue_t *q; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_topic_partition_list_t *orig_offsets, *to_list, *committed, + *listd, *subscription = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t err; + char errstr[512]; + const char *errstr2; +#define TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT 3 + int i; + const int partitions_cnt = 3; + char *topics[TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT]; + rd_kafka_metadata_topic_t + exp_mdtopics[TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT] = {{0}}; + int exp_mdtopic_cnt = 0; + test_timing_t timing; + rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_kafka_ListConsumerGroupOffsets_t *cgoffsets; + const rd_kafka_ListConsumerGroupOffsets_result_t *res; + const rd_kafka_group_result_t **gres; + size_t gres_cnt; + rd_kafka_t *consumer; + char *group_id; + + SUB_TEST_QUICK( + "%s ListConsumerGroupOffsets with %s, " + "request timeout %d%s", + rd_kafka_name(rk), what, req_timeout_ms, + sub_consumer ? ", with subscribing consumer" : ""); + + q = useq ? useq : rd_kafka_queue_new(rk); + + if (req_timeout_ms != -1) { + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS); + + err = rd_kafka_AdminOptions_set_request_timeout( + options, req_timeout_ms, errstr, sizeof(errstr)); + TEST_ASSERT(!err, "%s", rd_kafka_err2str(err)); + } + + + subscription = rd_kafka_topic_partition_list_new( + TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT); + + for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) { + char pfx[64]; + char *topic; + + rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i); + topic = rd_strdup(test_mk_topic_name(pfx, 1)); + + topics[i] = topic; + exp_mdtopics[exp_mdtopic_cnt++].topic = topic; + + rd_kafka_topic_partition_list_add(subscription, topic, + RD_KAFKA_PARTITION_UA); + } + + group_id = topics[0]; + + /* Create the topics first. */ + test_CreateTopics_simple(rk, NULL, topics, + TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT, + partitions_cnt, NULL); + + /* Verify that topics are reported by metadata */ + test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0, + 15 * 1000); + + rd_sleep(1); /* Additional wait time for cluster propagation */ + + consumer = test_create_consumer(group_id, NULL, NULL, NULL); + + if (sub_consumer) { + TEST_CALL_ERR__(rd_kafka_subscribe(consumer, subscription)); + test_consumer_wait_assignment(consumer, rd_true); + } + + /* Commit some offsets */ + orig_offsets = rd_kafka_topic_partition_list_new( + TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * 2); + for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * 2; i++) + rd_kafka_topic_partition_list_add( + orig_offsets, topics[i / 2], + i % TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT) + ->offset = (i + 1) * 10; + + TEST_CALL_ERR__(rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/)); + + /* Verify committed offsets match */ + committed = rd_kafka_topic_partition_list_copy(orig_offsets); + TEST_CALL_ERR__( + rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000))); + + if (test_partition_list_cmp(committed, orig_offsets)) { + TEST_SAY("commit() list:\n"); + test_print_partition_list(orig_offsets); + TEST_SAY("committed() list:\n"); + test_print_partition_list(committed); + TEST_FAIL("committed offsets don't match"); + } + + rd_kafka_topic_partition_list_destroy(committed); + + to_list = rd_kafka_topic_partition_list_new(orig_offsets->cnt); + for (i = 0; i < orig_offsets->cnt; i++) { + rd_kafka_topic_partition_list_add( + to_list, orig_offsets->elems[i].topic, + orig_offsets->elems[i].partition); + } + + if (null_toppars) { + cgoffsets = + rd_kafka_ListConsumerGroupOffsets_new(group_id, NULL); + } else { + cgoffsets = + rd_kafka_ListConsumerGroupOffsets_new(group_id, to_list); + } + + TIMING_START(&timing, "ListConsumerGroupOffsets"); + TEST_SAY("Call ListConsumerGroupOffsets\n"); + rd_kafka_ListConsumerGroupOffsets(rk, &cgoffsets, 1, options, q); + TIMING_ASSERT_LATER(&timing, 0, 50); + + rd_kafka_ListConsumerGroupOffsets_destroy(cgoffsets); + + TIMING_START(&timing, "ListConsumerGroupOffsets.queue_poll"); + /* Poll result queue for ListConsumerGroupOffsets result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000)); + TEST_SAY("ListConsumerGroupOffsets: got %s in %.3fms\n", + rd_kafka_event_name(rkev), + TIMING_DURATION(&timing) / 1000.0f); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT) + break; + + rd_kafka_event_destroy(rkev); + } + + /* Convert event to proper result */ + res = rd_kafka_event_ListConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, "expected ListConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + + /* Expecting error */ + err = rd_kafka_event_error(rkev); + errstr2 = rd_kafka_event_error_string(rkev); + TEST_ASSERT(!err, + "expected ListConsumerGroupOffsets to succeed, " + "got %s (%s)", + rd_kafka_err2name(err), err ? errstr2 : "n/a"); + + TEST_SAY("ListConsumerGroupOffsets: returned %s (%s)\n", + rd_kafka_err2str(err), err ? errstr2 : "n/a"); + + gres = rd_kafka_ListConsumerGroupOffsets_result_groups(res, &gres_cnt); + TEST_ASSERT(gres && gres_cnt == 1, + "expected gres_cnt == 1, not %" PRIusz, gres_cnt); + + listd = rd_kafka_topic_partition_list_copy( + rd_kafka_group_result_partitions(gres[0])); + + if (test_partition_list_and_offsets_cmp(listd, orig_offsets)) { + TEST_SAY("Result list:\n"); + test_print_partition_list(listd); + TEST_SAY("Partitions passed to ListConsumerGroupOffsets:\n"); + test_print_partition_list(orig_offsets); + TEST_FAIL("listd/requested offsets don't match"); + } + + /* Verify expected errors */ + for (i = 0; i < listd->cnt; i++) { + TEST_ASSERT_LATER(listd->elems[i].err == exp_err, + "Result %s [%" PRId32 + "] has error %s, " + "expected %s", + listd->elems[i].topic, + listd->elems[i].partition, + rd_kafka_err2name(listd->elems[i].err), + rd_kafka_err2name(exp_err)); + } + + TEST_LATER_CHECK(); + + rd_kafka_topic_partition_list_destroy(listd); + rd_kafka_topic_partition_list_destroy(to_list); + + rd_kafka_event_destroy(rkev); + + rd_kafka_topic_partition_list_destroy(orig_offsets); + rd_kafka_topic_partition_list_destroy(subscription); + + for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) + rd_free(topics[i]); + + rd_kafka_destroy(consumer); + + if (options) + rd_kafka_AdminOptions_destroy(options); + + if (!useq) + rd_kafka_queue_destroy(q); + + TEST_LATER_CHECK(); + +#undef TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT + + SUB_TEST_PASS(); +} + +static void do_test_apis(rd_kafka_type_t cltype) { + rd_kafka_t *rk; + rd_kafka_conf_t *conf; + rd_kafka_queue_t *mainq; + + /* Get the available brokers, but use a separate rd_kafka_t instance + * so we don't jinx the tests by having up-to-date metadata. */ + avail_brokers = test_get_broker_ids(NULL, &avail_broker_cnt); + TEST_SAY("%" PRIusz + " brokers in cluster " + "which will be used for replica sets\n", + avail_broker_cnt); + + do_test_unclean_destroy(cltype, 0 /*tempq*/); + do_test_unclean_destroy(cltype, 1 /*mainq*/); + + test_conf_init(&conf, NULL, 180); + test_conf_set(conf, "socket.timeout.ms", "10000"); + rk = test_create_handle(cltype, conf); + + mainq = rd_kafka_queue_get_main(rk); /* Create topics */ do_test_CreateTopics("temp queue, op timeout 0", rk, NULL, 0, 0); @@ -2708,20 +3684,58 @@ do_test_DeleteRecords("temp queue, op timeout 0", rk, NULL, 0); do_test_DeleteRecords("main queue, op timeout 1500", rk, mainq, 1500); + /* List groups */ + do_test_ListConsumerGroups("temp queue", rk, NULL, -1, rd_false); + do_test_ListConsumerGroups("main queue", rk, mainq, 1500, rd_true); + + /* Describe groups */ + do_test_DescribeConsumerGroups("temp queue", rk, NULL, -1); + do_test_DescribeConsumerGroups("main queue", rk, mainq, 1500); + /* Delete groups */ - do_test_DeleteGroups("temp queue, op timeout 0", rk, NULL, 0); - do_test_DeleteGroups("main queue, op timeout 1500", rk, mainq, 1500); - do_test_DeleteGroups("main queue, op timeout 1500", rk, mainq, 1500); + do_test_DeleteGroups("temp queue", rk, NULL, -1); + do_test_DeleteGroups("main queue", rk, mainq, 1500); if (test_broker_version >= TEST_BRKVER(2, 4, 0, 0)) { /* Delete committed offsets */ - do_test_DeleteConsumerGroupOffsets("temp queue, op timeout 0", - rk, NULL, 0, rd_false); + do_test_DeleteConsumerGroupOffsets("temp queue", rk, NULL, -1, + rd_false); + do_test_DeleteConsumerGroupOffsets("main queue", rk, mainq, + 1500, rd_false); do_test_DeleteConsumerGroupOffsets( - "main queue, op timeout 1500", rk, mainq, 1500, rd_false); - do_test_DeleteConsumerGroupOffsets( - "main queue, op timeout 1500", rk, mainq, 1500, + "main queue", rk, mainq, 1500, rd_true /*with subscribing consumer*/); + + /* Alter committed offsets */ + do_test_AlterConsumerGroupOffsets("temp queue", rk, NULL, -1, + rd_false, rd_true); + do_test_AlterConsumerGroupOffsets("main queue", rk, mainq, 1500, + rd_false, rd_true); + do_test_AlterConsumerGroupOffsets( + "main queue, nonexistent topics", rk, mainq, 1500, rd_false, + rd_false /* don't create topics */); + do_test_AlterConsumerGroupOffsets( + "main queue", rk, mainq, 1500, + rd_true, /*with subscribing consumer*/ + rd_true); + + /* List committed offsets */ + do_test_ListConsumerGroupOffsets("temp queue", rk, NULL, -1, + rd_false, rd_false); + do_test_ListConsumerGroupOffsets( + "main queue, op timeout " + "1500", + rk, mainq, 1500, rd_false, rd_false); + do_test_ListConsumerGroupOffsets( + "main queue", rk, mainq, 1500, + rd_true /*with subscribing consumer*/, rd_false); + do_test_ListConsumerGroupOffsets("temp queue", rk, NULL, -1, + rd_false, rd_true); + do_test_ListConsumerGroupOffsets("main queue", rk, mainq, 1500, + rd_false, rd_true); + do_test_ListConsumerGroupOffsets( + "main queue", rk, mainq, 1500, + rd_true /*with subscribing consumer*/, rd_true); } rd_kafka_queue_destroy(mainq); diff -Nru librdkafka-1.9.2/tests/0084-destroy_flags.c librdkafka-2.0.2/tests/0084-destroy_flags.c --- librdkafka-1.9.2/tests/0084-destroy_flags.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0084-destroy_flags.c 2023-01-20 09:14:36.000000000 +0000 @@ -182,8 +182,10 @@ /* Create the topic to avoid not-yet-auto-created-topics being * subscribed to (and thus raising an error). */ - if (!local_mode) + if (!local_mode) { test_create_topic(NULL, topic, 3, 1); + test_wait_topic_exists(NULL, topic, 5000); + } for (i = 0; i < (int)RD_ARRAYSIZE(args); i++) { for (j = 0; j < (int)RD_ARRAYSIZE(flag_combos); j++) { diff -Nru librdkafka-1.9.2/tests/0097-ssl_verify.cpp librdkafka-2.0.2/tests/0097-ssl_verify.cpp --- librdkafka-1.9.2/tests/0097-ssl_verify.cpp 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0097-ssl_verify.cpp 2023-01-20 09:14:36.000000000 +0000 @@ -37,21 +37,21 @@ static const std::string envname[RdKafka::CERT__CNT][RdKafka::CERT_ENC__CNT] = { /* [RdKafka::CERT_PUBLIC_KEY] = */ { - "RDK_SSL_pkcs", - "RDK_SSL_pub_der", - "RDK_SSL_pub_pem", + "SSL_pkcs", + "SSL_pub_der", + "SSL_pub_pem", }, /* [RdKafka::CERT_PRIVATE_KEY] = */ { - "RDK_SSL_pkcs", - "RDK_SSL_priv_der", - "RDK_SSL_priv_pem", + "SSL_pkcs", + "SSL_priv_der", + "SSL_priv_pem", }, /* [RdKafka::CERT_CA] = */ { - "RDK_SSL_pkcs", - "RDK_SSL_ca_der", - "RDK_SSL_ca_pem", + "SSL_pkcs", + "SSL_ca_der", + "SSL_all_cas_pem" /* Contains multiple CA certs */, }}; @@ -118,26 +118,45 @@ }; +/** + * @brief Set SSL PEM cert/key using configuration property. + * + * The cert/key is loadded from environment variables set up by trivup. + * + * @param loc_prop ssl.X.location property that will be cleared. + * @param pem_prop ssl.X.pem property that will be set. + * @param cert_type Certificate type. + */ static void conf_location_to_pem(RdKafka::Conf *conf, std::string loc_prop, - std::string pem_prop) { + std::string pem_prop, + RdKafka::CertificateType cert_type) { std::string loc; - - if (conf->get(loc_prop, loc) != RdKafka::Conf::CONF_OK) - Test::Fail("Failed to get " + loc_prop); - std::string errstr; if (conf->set(loc_prop, "", errstr) != RdKafka::Conf::CONF_OK) Test::Fail("Failed to reset " + loc_prop + ": " + errstr); + const char *p; + p = test_getenv(envname[cert_type][RdKafka::CERT_ENC_PEM].c_str(), NULL); + if (!p) + Test::Fail( + "Invalid test environment: " + "Missing " + + envname[cert_type][RdKafka::CERT_ENC_PEM] + + " env variable: make sure trivup is up to date"); + + loc = p; + + /* Read file */ std::ifstream ifs(loc.c_str()); std::string pem((std::istreambuf_iterator(ifs)), std::istreambuf_iterator()); - Test::Say("Read " + loc_prop + "=" + loc + - " from disk and changed to in-memory " + pem_prop + "\n"); + Test::Say("Read env " + envname[cert_type][RdKafka::CERT_ENC_PEM] + "=" + + loc + " from disk and changed to in-memory " + pem_prop + + " string\n"); if (conf->set(pem_prop, pem, errstr) != RdKafka::Conf::CONF_OK) Test::Fail("Failed to set " + pem_prop + ": " + errstr); @@ -178,7 +197,8 @@ loc = p; Test::Say(tostr() << "Reading " << loc_prop << " file " << loc << " as " - << encnames[encoding] << "\n"); + << encnames[encoding] << " from env " + << envname[cert_type][encoding] << "\n"); /* Read file */ std::ifstream ifs(loc.c_str(), std::ios::binary | std::ios::ate); @@ -193,15 +213,15 @@ if (conf->set_ssl_cert(cert_type, encoding, buffer.data(), size, errstr) != RdKafka::Conf::CONF_OK) - Test::Fail(tostr() << "Failed to set cert from " << loc << " as cert type " - << cert_type << " with encoding " << encoding << ": " - << errstr << "\n"); + Test::Fail(tostr() << "Failed to set " << loc_prop << " from " << loc + << " as cert type " << cert_type << " with encoding " + << encoding << ": " << errstr << "\n"); } typedef enum { - USE_LOCATION, /* use ssl.key.location */ - USE_CONF, /* use ssl.key.pem */ + USE_LOCATION, /* use ssl.X.location */ + USE_CONF, /* use ssl.X.pem */ USE_SETTER, /* use conf->set_ssl_cert(), this supports multiple formats */ } cert_load_t; @@ -245,20 +265,22 @@ /* Get ssl.key.location, read its contents, and replace with * ssl.key.pem. Same with ssl.certificate.location -> ssl.certificate.pem. */ if (load_key == USE_CONF) - conf_location_to_pem(conf, "ssl.key.location", "ssl.key.pem"); + conf_location_to_pem(conf, "ssl.key.location", "ssl.key.pem", + RdKafka::CERT_PRIVATE_KEY); else if (load_key == USE_SETTER) conf_location_to_setter(conf, "ssl.key.location", RdKafka::CERT_PRIVATE_KEY, key_enc); if (load_pub == USE_CONF) conf_location_to_pem(conf, "ssl.certificate.location", - "ssl.certificate.pem"); + "ssl.certificate.pem", RdKafka::CERT_PUBLIC_KEY); else if (load_pub == USE_SETTER) conf_location_to_setter(conf, "ssl.certificate.location", RdKafka::CERT_PUBLIC_KEY, pub_enc); if (load_ca == USE_CONF) - conf_location_to_pem(conf, "ssl.ca.location", "ssl.ca.pem"); + conf_location_to_pem(conf, "ssl.ca.location", "ssl.ca.pem", + RdKafka::CERT_CA); else if (load_ca == USE_SETTER) conf_location_to_setter(conf, "ssl.ca.location", RdKafka::CERT_CA, ca_enc); @@ -322,8 +344,7 @@ if (conf->set("security.protocol", "SSL", errstr)) Test::Fail(errstr); - if (conf->set("ssl.key.password", test_getenv("RDK_SSL_password", NULL), - errstr)) + if (conf->set("ssl.key.password", test_getenv("SSL_password", NULL), errstr)) Test::Fail(errstr); std::vector certBuffer = read_file(test_getenv( @@ -367,7 +388,7 @@ return 0; } - if (!test_getenv("RDK_SSL_pkcs", NULL)) { + if (!test_getenv("SSL_pkcs", NULL)) { Test::Skip("Test requires SSL_* env-vars set up by trivup\n"); return 0; } @@ -392,6 +413,12 @@ do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM, USE_SETTER, RdKafka::CERT_ENC_DER, USE_SETTER, RdKafka::CERT_ENC_DER); + do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM, + USE_SETTER, RdKafka::CERT_ENC_DER, USE_SETTER, + RdKafka::CERT_ENC_PEM); /* env: SSL_all_cas_pem */ + do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM, + USE_SETTER, RdKafka::CERT_ENC_DER, USE_CONF, + RdKafka::CERT_ENC_PEM); /* env: SSL_all_cas_pem */ do_test_verify(__LINE__, true, USE_SETTER, RdKafka::CERT_ENC_PKCS12, USE_SETTER, RdKafka::CERT_ENC_PKCS12, USE_SETTER, RdKafka::CERT_ENC_PKCS12); diff -Nru librdkafka-1.9.2/tests/0101-fetch-from-follower.cpp librdkafka-2.0.2/tests/0101-fetch-from-follower.cpp --- librdkafka-1.9.2/tests/0101-fetch-from-follower.cpp 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0101-fetch-from-follower.cpp 2023-01-20 09:14:36.000000000 +0000 @@ -61,11 +61,11 @@ */ -static void test_assert(bool cond, std::string msg) { - if (!cond) - Test::Say(msg); - assert(cond); -} +#define test_assert(cond, msg) \ + do { \ + if (!(cond)) \ + Test::Say(msg); \ + } while (0) class TestEvent2Cb : public RdKafka::EventCb { @@ -235,6 +235,7 @@ test_assert(!err, cerrstr); rd_kafka_DescribeConfigs(p->c_ptr(), &config, 1, options, mainq); + rd_kafka_ConfigResource_destroy(config); rd_kafka_AdminOptions_destroy(options); rd_kafka_event_t *rkev = test_wait_admin_result( mainq, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 5000); @@ -275,6 +276,7 @@ rd_kafka_event_destroy(rkev); } + rd_kafka_queue_destroy(mainq); delete p; return (int)racks.size(); diff -Nru librdkafka-1.9.2/tests/0102-static_group_rebalance.c librdkafka-2.0.2/tests/0102-static_group_rebalance.c --- librdkafka-1.9.2/tests/0102-static_group_rebalance.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0102-static_group_rebalance.c 2023-01-20 09:14:36.000000000 +0000 @@ -493,6 +493,7 @@ rd_kafka_err2str(rkm->err), rd_kafka_message_errstr(rkm)); TEST_SAY("Fenced consumer returned expected: %s: %s\n", rd_kafka_err2name(rkm->err), rd_kafka_message_errstr(rkm)); + rd_kafka_message_destroy(rkm); /* Read the actual error */ diff -Nru librdkafka-1.9.2/tests/0103-transactions.c librdkafka-2.0.2/tests/0103-transactions.c --- librdkafka-1.9.2/tests/0103-transactions.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0103-transactions.c 2023-01-20 09:14:36.000000000 +0000 @@ -635,14 +635,17 @@ error = rd_kafka_begin_transaction(p); TEST_ASSERT(error, "Expected begin_transactions() to fail"); TEST_ASSERT(rd_kafka_error_code(error) == - RD_KAFKA_RESP_ERR__STATE, + RD_KAFKA_RESP_ERR__CONFLICT, "Expected begin_transactions() to fail " - "with STATE, not %s", + "with CONFLICT, not %s", rd_kafka_error_name(error)); rd_kafka_error_destroy(error); } + TEST_ASSERT(i <= 5000, + "init_transactions() did not succeed after %d calls\n", i); + TEST_SAY("init_transactions() succeeded after %d call(s)\n", i + 1); /* Make sure a sub-sequent init call fails. */ diff -Nru librdkafka-1.9.2/tests/0105-transactions_mock.c librdkafka-2.0.2/tests/0105-transactions_mock.c --- librdkafka-1.9.2/tests/0105-transactions_mock.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0105-transactions_mock.c 2023-01-20 09:14:36.000000000 +0000 @@ -129,10 +129,11 @@ * which must be assigned prior to * calling create_tnx_producer(). */ -static rd_kafka_t *create_txn_producer(rd_kafka_mock_cluster_t **mclusterp, - const char *transactional_id, - int broker_cnt, - ...) { +static RD_SENTINEL rd_kafka_t * +create_txn_producer(rd_kafka_mock_cluster_t **mclusterp, + const char *transactional_id, + int broker_cnt, + ...) { rd_kafka_conf_t *conf; rd_kafka_t *rk; char numstr[8]; @@ -145,6 +146,12 @@ test_conf_init(&conf, NULL, 60); test_conf_set(conf, "transactional.id", transactional_id); + /* When mock brokers are set to down state they're still binding + * the port, just not listening to it, which makes connection attempts + * stall until socket.connection.setup.timeout.ms expires. + * To speed up detection of brokers being down we reduce this timeout + * to just a couple of seconds. */ + test_conf_set(conf, "socket.connection.setup.timeout.ms", "5000"); /* Speed up reconnects */ test_conf_set(conf, "reconnect.backoff.max.ms", "2000"); test_conf_set(conf, "test.mock.num.brokers", numstr); @@ -587,6 +594,7 @@ rd_bool_t exp_retriable; rd_bool_t exp_abortable; rd_bool_t exp_fatal; + rd_bool_t exp_successful_abort; } scenario[] = { /* This list of errors is from the EndTxnResponse handler in * AK clients/.../TransactionManager.java */ @@ -623,22 +631,24 @@ RD_KAFKA_RESP_ERR_NO_ERROR, }, { - /* #4 */ + /* #4: the abort is auto-recovering thru epoch bump */ 1, {RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID}, RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID, rd_false /* !retriable */, rd_true /* abortable */, - rd_false /* !fatal */ + rd_false /* !fatal */, + rd_true /* successful abort */ }, { - /* #5 */ + /* #5: the abort is auto-recovering thru epoch bump */ 1, {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING}, RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING, rd_false /* !retriable */, rd_true /* abortable */, - rd_false /* !fatal */ + rd_false /* !fatal */, + rd_true /* successful abort */ }, { /* #6 */ @@ -715,6 +725,8 @@ for (j = 0; j < (2 + 2); j++) { rd_bool_t commit = j < 2; rd_bool_t with_flush = j & 1; + rd_bool_t exp_successful_abort = + !commit && scenario[i].exp_successful_abort; const char *commit_str = commit ? (with_flush ? "commit&flush" : "commit") : (with_flush ? "abort&flush" : "abort"); @@ -726,7 +738,9 @@ TEST_SAY("Testing scenario #%d %s with %" PRIusz " injected erorrs, expecting %s\n", i, commit_str, scenario[i].error_cnt, - rd_kafka_err2name(scenario[i].exp_err)); + exp_successful_abort + ? "successful abort" + : rd_kafka_err2name(scenario[i].exp_err)); if (!rk) { const char *txnid = "myTxnId"; @@ -811,7 +825,7 @@ TEST_SAY("Scenario #%d %s succeeded\n", i, commit_str); - if (!scenario[i].exp_err) { + if (!scenario[i].exp_err || exp_successful_abort) { TEST_ASSERT(!error, "Expected #%d %s to succeed, " "got %s", @@ -983,7 +997,7 @@ rd_kafka_error_t *error; test_timing_t t_call; - /* Messages will fail on as the transaction fails, + /* Messages will fail as the transaction fails, * ignore the DR error */ test_curr->ignore_dr_err = rd_true; @@ -994,7 +1008,7 @@ RD_KAFKA_V_END)); /* - * Commit/abort transaction, first with som retriable failures + * Commit/abort transaction, first with some retriable failures * whos retries exceed the user timeout. */ rd_kafka_mock_push_request_errors( @@ -1019,43 +1033,119 @@ error = rd_kafka_abort_transaction(rk, 100); TIMING_STOP(&t_call); - TEST_SAY("%s returned %s\n", commit_str, - error ? rd_kafka_error_string(error) : "success"); - + TEST_SAY_ERROR(error, "%s returned: ", commit_str); TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str); - TEST_ASSERT( rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, "Expected %s to fail with timeout, not %s: %s", commit_str, rd_kafka_error_name(error), rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_is_retriable(error), + "%s failure should raise a retriable error", + commit_str); + rd_kafka_error_destroy(error); - if (!commit) - TEST_ASSERT(!rd_kafka_error_txn_requires_abort(error), - "abort_transaction() failure should raise " - "a txn_requires_abort error"); - else { - TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), - "commit_transaction() failure should raise " - "a txn_requires_abort error"); - TEST_SAY( - "Aborting transaction as instructed by " - "error flag\n"); + /* Now call it again with an infinite timeout, should work. */ + TIMING_START(&t_call, "%s_transaction() nr 2", commit_str); + if (commit) + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + else TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); - } + TIMING_STOP(&t_call); + } + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + + +/** + * @brief Test commit/abort inflight timeout behaviour, which should result + * in a retriable error. + */ +static void do_test_txn_endtxn_timeout_inflight(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster = NULL; + const char *txnid = "myTxnId"; + int32_t coord_id = 1; + int i; + + SUB_TEST(); + + allowed_error = RD_KAFKA_RESP_ERR__TIMED_OUT; + test_curr->is_fatal_cb = error_is_fatal_cb; + + rk = create_txn_producer(&mcluster, txnid, 1, "transaction.timeout.ms", + "5000", NULL); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + for (i = 0; i < 2; i++) { + rd_bool_t commit = i == 0; + const char *commit_str = commit ? "commit" : "abort"; + rd_kafka_error_t *error; + test_timing_t t_call; + + /* Messages will fail as the transaction fails, + * ignore the DR error */ + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2), + RD_KAFKA_V_END)); + + /* Let EndTxn & EndTxn retry timeout */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_EndTxn, 2, + RD_KAFKA_RESP_ERR_NO_ERROR, 10000, + RD_KAFKA_RESP_ERR_NO_ERROR, 10000); + + rd_sleep(1); + + TIMING_START(&t_call, "%s_transaction()", commit_str); + if (commit) + error = rd_kafka_commit_transaction(rk, 4000); + else + error = rd_kafka_abort_transaction(rk, 4000); + TIMING_STOP(&t_call); + TEST_SAY_ERROR(error, "%s returned: ", commit_str); + TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str); + TEST_ASSERT( + rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected %s to fail with timeout, not %s: %s", commit_str, + rd_kafka_error_name(error), rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_is_retriable(error), + "%s failure should raise a retriable error", + commit_str); rd_kafka_error_destroy(error); - TIMING_ASSERT(&t_call, 99, 199); + /* Now call it again with an infinite timeout, should work. */ + TIMING_START(&t_call, "%s_transaction() nr 2", commit_str); + if (commit) + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + else + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + TIMING_STOP(&t_call); } /* All done */ rd_kafka_destroy(rk); + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->is_fatal_cb = NULL; + SUB_TEST_PASS(); } + /** * @brief Test that EndTxn is properly sent for aborted transactions * even if AddOffsetsToTxnRequest was retried. @@ -1602,6 +1692,60 @@ /** + * @brief Verify that send_offsets_to_transaction() with no eligible offsets + * is handled properly - the call should succeed immediately and be + * repeatable. + */ +static void do_test_txns_send_offsets_non_eligible(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + SUB_TEST_QUICK(); + + rk = create_txn_producer(&mcluster, "txnid", 3, NULL); + + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + /* Wait for messages to be delivered */ + test_flush(rk, 5000); + + /* Empty offsets list */ + offsets = rd_kafka_topic_partition_list_new(0); + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + + /* Now call it again, should also succeed. */ + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000)); + + /* All done */ + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** * @brief Verify that request timeouts don't cause crash (#2913). */ static void do_test_txns_no_timeout_crash(void) { @@ -2480,8 +2624,7 @@ error = rd_kafka_commit_transaction(rk, -1); TEST_ASSERT(error != NULL, "expected commit_transaciton() to fail"); - TEST_SAY("commit_transaction() failed (as expected): %s\n", - rd_kafka_error_string(error)); + TEST_SAY_ERROR(error, "commit_transaction() failed (as expected): "); TEST_ASSERT(rd_kafka_error_txn_requires_abort(error), "Expected txn_requires_abort error"); rd_kafka_error_destroy(error); @@ -2496,7 +2639,6 @@ TEST_ASSERT(remains == 0, "%d message(s) were not flushed\n", remains); TEST_SAY("Attempting second transaction, which should succeed\n"); - allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; test_curr->is_fatal_cb = error_is_fatal_cb; test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; @@ -2509,6 +2651,7 @@ rd_kafka_destroy(rk); + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; test_curr->is_fatal_cb = NULL; SUB_TEST_PASS(); @@ -2835,7 +2978,8 @@ state.mcluster = mcluster; state.grpid = grpid; state.broker_id = switch_coord ? 3 : 2; - thrd_create(&thrd, delayed_up_cb, &state); + if (thrd_create(&thrd, delayed_up_cb, &state) != thrd_success) + TEST_FAIL("Failed to create thread"); TEST_SAY("Calling send_offsets_to_transaction()\n"); offsets = rd_kafka_topic_partition_list_new(1); @@ -2866,83 +3010,897 @@ } +/** + * @brief Test that a NULL coordinator is not fatal when + * the transactional producer reconnects to the txn coordinator + * and the first thing it does is a FindCoordinatorRequest that + * fails with COORDINATOR_NOT_AVAILABLE, setting coordinator to NULL. + */ +static void do_test_txn_coordinator_null_not_fatal(void) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + rd_kafka_resp_err_t err; + int32_t coord_id = 1; + const char *topic = "test"; + const char *transactional_id = "txnid"; + int msgcnt = 1; + int remains = 0; -int main_0105_transactions_mock(int argc, char **argv) { - if (test_needs_auth()) { - TEST_SKIP("Mock cluster does not support SSL/SASL\n"); - return 0; - } + SUB_TEST_QUICK(); - do_test_txn_recoverable_errors(); + /* Broker down is not a test-failing error */ + allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT; + test_curr->is_fatal_cb = error_is_fatal_cb; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT; - do_test_txn_fatal_idempo_errors(); + /* One second is the minimum transaction timeout */ + rk = create_txn_producer(&mcluster, transactional_id, 1, + "transaction.timeout.ms", "1000", NULL); - do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH); - do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_PRODUCER_FENCED); + err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); - do_test_txn_req_cnt(); + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + coord_id); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id); - do_test_txn_requires_abort_errors(); + /* Start transactioning */ + TEST_SAY("Starting transaction\n"); + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); - do_test_txn_slow_reinit(rd_false); - do_test_txn_slow_reinit(rd_true); + /* Makes the produce request timeout. */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_NO_ERROR, 3000); - /* Just do a subset of tests in quick mode */ - if (test_quick) - return 0; + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt, NULL, 0, &remains); - do_test_txn_endtxn_errors(); + /* This value is linked to transaction.timeout.ms, needs enough time + * so the message times out and a DrainBump sequence is started. */ + rd_kafka_flush(rk, 1000); + + /* To trigger the error the COORDINATOR_NOT_AVAILABLE response + * must come AFTER idempotent state has changed to WaitTransport + * but BEFORE it changes to WaitPID. To make it more likely + * rd_kafka_txn_coord_timer_start timeout can be changed to 5 ms + * in rd_kafka_txn_coord_query, when unable to query for + * transaction coordinator. + */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_FindCoordinator, 1, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, 10); - do_test_txn_endtxn_infinite(); + /* Coordinator down starts the FindCoordinatorRequest loop. */ + TEST_SAY("Bringing down coordinator %" PRId32 "\n", coord_id); + rd_kafka_mock_broker_set_down(mcluster, coord_id); - /* Skip tests for non-infinite commit/abort timeouts - * until they're properly handled by the producer. */ - if (0) - do_test_txn_endtxn_timeout(); + /* Coordinator down for some time. */ + rd_usleep(100 * 1000, NULL); - /* Bring down the coordinator */ - do_test_txn_broker_down_in_txn(rd_true); + /* When it comes up, the error is triggered, if the preconditions + * happen. */ + TEST_SAY("Bringing up coordinator %" PRId32 "\n", coord_id); + rd_kafka_mock_broker_set_up(mcluster, coord_id); - /* Bring down partition leader */ - do_test_txn_broker_down_in_txn(rd_false); + /* Make sure DRs are received */ + rd_kafka_flush(rk, 1000); - do_test_txns_not_supported(); + error = rd_kafka_commit_transaction(rk, -1); - do_test_txns_send_offsets_concurrent_is_retried(); + TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains); + TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail"); + TEST_SAY("commit_transaction() failed (expectedly): %s\n", + rd_kafka_error_string(error)); + rd_kafka_error_destroy(error); - do_test_txn_coord_req_destroy(); + /* Needs to wait some time before closing to make sure it doesn't go + * into TERMINATING state before error is triggered. */ + rd_usleep(1000 * 1000, NULL); + rd_kafka_destroy(rk); - do_test_txn_coord_req_multi_find(); + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR; + test_curr->is_fatal_cb = NULL; - do_test_txn_addparts_req_multi(); + SUB_TEST_PASS(); +} - do_test_txns_no_timeout_crash(); - do_test_txn_auth_failure( - RD_KAFKAP_InitProducerId, - RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED); - do_test_txn_auth_failure( - RD_KAFKAP_FindCoordinator, - RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED); +/** + * @brief Simple test to make sure the init_transactions() timeout is honoured + * and also not infinite. + */ +static void do_test_txn_resumable_init(void) { + rd_kafka_t *rk; + const char *transactional_id = "txnid"; + rd_kafka_error_t *error; + test_timing_t duration; - do_test_txn_flush_timeout(); + SUB_TEST(); - do_test_unstable_offset_commit(); + rd_kafka_conf_t *conf; - do_test_commit_after_msg_timeout(); + test_conf_init(&conf, NULL, 20); + test_conf_set(conf, "bootstrap.servers", ""); + test_conf_set(conf, "transactional.id", transactional_id); + test_conf_set(conf, "transaction.timeout.ms", "4000"); - do_test_txn_switch_coordinator(); + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); - do_test_txn_switch_coordinator_refresh(); + /* First make sure a lower timeout is honoured. */ + TIMING_START(&duration, "init_transactions(1000)"); + error = rd_kafka_init_transactions(rk, 1000); + TIMING_STOP(&duration); - do_test_out_of_order_seq(); + if (error) + TEST_SAY("First init_transactions failed (as expected): %s\n", + rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected _TIMED_OUT, not %s", + error ? rd_kafka_error_string(error) : "success"); + rd_kafka_error_destroy(error); - do_test_topic_disappears_for_awhile(); + TIMING_ASSERT(&duration, 900, 1500); - do_test_disconnected_group_coord(rd_false); + TEST_SAY( + "Performing second init_transactions() call now with an " + "infinite timeout: " + "should time out in 2 x transaction.timeout.ms\n"); + + TIMING_START(&duration, "init_transactions(infinite)"); + error = rd_kafka_init_transactions(rk, -1); + TIMING_STOP(&duration); - do_test_disconnected_group_coord(rd_true); + if (error) + TEST_SAY("Second init_transactions failed (as expected): %s\n", + rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT, + "Expected _TIMED_OUT, not %s", + error ? rd_kafka_error_string(error) : "success"); + rd_kafka_error_destroy(error); + + TIMING_ASSERT(&duration, 2 * 4000 - 500, 2 * 4000 + 500); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Retries a transaction call until it succeeds or returns a + * non-retriable error - which will cause the test to fail. + * + * @param intermed_calls Is a block of code that will be called after each + * retriable failure of \p call. + */ +#define RETRY_TXN_CALL__(call, intermed_calls) \ + do { \ + rd_kafka_error_t *_error = call; \ + if (!_error) \ + break; \ + TEST_SAY_ERROR(_error, "%s: ", "" #call); \ + TEST_ASSERT(rd_kafka_error_is_retriable(_error), \ + "Expected retriable error"); \ + TEST_SAY("%s failed, retrying in 1 second\n", "" #call); \ + rd_kafka_error_destroy(_error); \ + intermed_calls; \ + rd_sleep(1); \ + } while (1) + +/** + * @brief Call \p call and expect it to fail with \p exp_err_code. + */ +#define TXN_CALL_EXPECT_ERROR__(call, exp_err_code) \ + do { \ + rd_kafka_error_t *_error = call; \ + TEST_ASSERT(_error != NULL, \ + "%s: Expected %s error, got success", "" #call, \ + rd_kafka_err2name(exp_err_code)); \ + TEST_SAY_ERROR(_error, "%s: ", "" #call); \ + TEST_ASSERT(rd_kafka_error_code(_error) == exp_err_code, \ + "%s: Expected %s error, got %s", "" #call, \ + rd_kafka_err2name(exp_err_code), \ + rd_kafka_error_name(_error)); \ + rd_kafka_error_destroy(_error); \ + } while (0) + + +/** + * @brief Simple test to make sure short API timeouts can be safely resumed + * by calling the same API again. + * + * @param do_commit Commit transaction if true, else abort transaction. + */ +static void do_test_txn_resumable_calls_timeout(rd_bool_t do_commit) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + int32_t coord_id = 1; + const char *topic = "test"; + const char *transactional_id = "txnid"; + int msgcnt = 1; + int remains = 0; + + SUB_TEST("%s_transaction", do_commit ? "commit" : "abort"); + + rk = create_txn_producer(&mcluster, transactional_id, 1, NULL); + + err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + coord_id); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id); + + TEST_SAY("Starting transaction\n"); + TEST_SAY("Delaying first two InitProducerIdRequests by 500ms\n"); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_InitProducerId, 2, + RD_KAFKA_RESP_ERR_NO_ERROR, 500, RD_KAFKA_RESP_ERR_NO_ERROR, 500); + + RETRY_TXN_CALL__( + rd_kafka_init_transactions(rk, 100), + TXN_CALL_EXPECT_ERROR__(rd_kafka_abort_transaction(rk, -1), + RD_KAFKA_RESP_ERR__CONFLICT)); + + RETRY_TXN_CALL__(rd_kafka_begin_transaction(rk), /*none*/); + + + TEST_SAY("Delaying ProduceRequests by 3000ms\n"); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_NO_ERROR, 3000); + + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt, NULL, 0, &remains); + + + TEST_SAY("Delaying SendOffsetsToTransaction by 400ms\n"); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_AddOffsetsToTxn, 1, + RD_KAFKA_RESP_ERR_NO_ERROR, 400); + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic", 0)->offset = 12; + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + /* This is not a resumable call on timeout */ + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + + TEST_SAY("Delaying EndTxnRequests by 1200ms\n"); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_EndTxn, 1, RD_KAFKA_RESP_ERR_NO_ERROR, + 1200); + + /* Committing/aborting the transaction will also be delayed by the + * previous accumulated remaining delays. */ + + if (do_commit) { + TEST_SAY("Committing transaction\n"); + + RETRY_TXN_CALL__( + rd_kafka_commit_transaction(rk, 100), + TXN_CALL_EXPECT_ERROR__(rd_kafka_abort_transaction(rk, -1), + RD_KAFKA_RESP_ERR__CONFLICT)); + } else { + TEST_SAY("Aborting transaction\n"); + + RETRY_TXN_CALL__( + rd_kafka_abort_transaction(rk, 100), + TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, -1), + RD_KAFKA_RESP_ERR__CONFLICT)); + } + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Verify that resuming timed out calls that after the timeout, but + * before the resuming call, would error out. + */ +static void do_test_txn_resumable_calls_timeout_error(rd_bool_t do_commit) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_resp_err_t err; + int32_t coord_id = 1; + const char *topic = "test"; + const char *transactional_id = "txnid"; + int msgcnt = 1; + int remains = 0; + rd_kafka_error_t *error; + + SUB_TEST_QUICK("%s_transaction", do_commit ? "commit" : "abort"); + + rk = create_txn_producer(&mcluster, transactional_id, 1, NULL); + + err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id, + coord_id); + rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id); + + TEST_SAY("Starting transaction\n"); + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, + msgcnt, NULL, 0, &remains); + + + TEST_SAY("Fail EndTxn fatally after 2000ms\n"); + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, coord_id, RD_KAFKAP_EndTxn, 1, + RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, 2000); + + if (do_commit) { + TEST_SAY("Committing transaction\n"); + + TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, 500), + RD_KAFKA_RESP_ERR__TIMED_OUT); + + /* Sleep so that the background EndTxn fails locally and sets + * an error result. */ + rd_sleep(3); + + error = rd_kafka_commit_transaction(rk, -1); + + } else { + TEST_SAY("Aborting transaction\n"); + + TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, 500), + RD_KAFKA_RESP_ERR__TIMED_OUT); + + /* Sleep so that the background EndTxn fails locally and sets + * an error result. */ + rd_sleep(3); + + error = rd_kafka_commit_transaction(rk, -1); + } + + TEST_ASSERT(error != NULL && rd_kafka_error_is_fatal(error), + "Expected fatal error, not %s", + rd_kafka_error_string(error)); + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, + "Expected error INVALID_TXN_STATE, got %s", + rd_kafka_error_name(error)); + rd_kafka_error_destroy(error); + + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +/** + * @brief Concurrent transaction API calls are not permitted. + * This test makes sure they're properly enforced. + * + * For each transactional API, call it with a 5s timeout, and during that time + * from another thread call transactional APIs, one by one, and verify that + * we get an ERR__CONFLICT error back in the second thread. + * + * We use a mutex for synchronization, the main thread will hold the lock + * when not calling an API but release it just prior to calling. + * The other thread will acquire the lock, sleep, and hold the lock while + * calling the concurrent API that should fail immediately, releasing the lock + * when done. + * + */ + +struct _txn_concurrent_state { + const char *api; + mtx_t lock; + rd_kafka_t *rk; + struct test *test; +}; + +static int txn_concurrent_thread_main(void *arg) { + struct _txn_concurrent_state *state = arg; + static const char *apis[] = { + "init_transactions", "begin_transaction", + "send_offsets_to_transaction", "commit_transaction", + "abort_transaction", NULL}; + rd_kafka_t *rk = state->rk; + const char *main_api = NULL; + int i; + + /* Update TLS variable so TEST_..() macros work */ + test_curr = state->test; + + while (1) { + const char *api = NULL; + const int timeout_ms = 10000; + rd_kafka_error_t *error = NULL; + rd_kafka_resp_err_t exp_err; + test_timing_t duration; + + /* Wait for other thread's txn call to start, then sleep a bit + * to increase the chance of that call has really begun. */ + mtx_lock(&state->lock); + + if (state->api && state->api == main_api) { + /* Main thread is still blocking on the last API call */ + TEST_SAY("Waiting for main thread to finish %s()\n", + main_api); + mtx_unlock(&state->lock); + rd_sleep(1); + continue; + } else if (!(main_api = state->api)) { + mtx_unlock(&state->lock); + break; + } + + rd_sleep(1); + + for (i = 0; (api = apis[i]) != NULL; i++) { + TEST_SAY( + "Triggering concurrent %s() call while " + "main is in %s() call\n", + api, main_api); + TIMING_START(&duration, "%s", api); + + if (!strcmp(api, "init_transactions")) + error = + rd_kafka_init_transactions(rk, timeout_ms); + else if (!strcmp(api, "begin_transaction")) + error = rd_kafka_begin_transaction(rk); + else if (!strcmp(api, "send_offsets_to_transaction")) { + rd_kafka_topic_partition_list_t *offsets = + rd_kafka_topic_partition_list_new(1); + rd_kafka_consumer_group_metadata_t *cgmetadata = + rd_kafka_consumer_group_metadata_new( + "mygroupid"); + rd_kafka_topic_partition_list_add(offsets, + "srctopic", 0) + ->offset = 12; + + error = rd_kafka_send_offsets_to_transaction( + rk, offsets, cgmetadata, -1); + rd_kafka_consumer_group_metadata_destroy( + cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + } else if (!strcmp(api, "commit_transaction")) + error = + rd_kafka_commit_transaction(rk, timeout_ms); + else if (!strcmp(api, "abort_transaction")) + error = + rd_kafka_abort_transaction(rk, timeout_ms); + else + TEST_FAIL("Unknown API: %s", api); + + TIMING_STOP(&duration); + + TEST_SAY_ERROR(error, "Conflicting %s() call: ", api); + TEST_ASSERT(error, + "Expected conflicting %s() call to fail", + api); + + exp_err = !strcmp(api, main_api) + ? RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS + : RD_KAFKA_RESP_ERR__CONFLICT; + + TEST_ASSERT(rd_kafka_error_code(error) == exp_err, + + "Conflicting %s(): Expected %s, not %s", + api, rd_kafka_err2str(exp_err), + rd_kafka_error_name(error)); + TEST_ASSERT( + rd_kafka_error_is_retriable(error), + "Conflicting %s(): Expected retriable error", api); + rd_kafka_error_destroy(error); + /* These calls should fail immediately */ + TIMING_ASSERT(&duration, 0, 100); + } + + mtx_unlock(&state->lock); + } + + return 0; +} + +static void do_test_txn_concurrent_operations(rd_bool_t do_commit) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + int32_t coord_id = 1; + rd_kafka_resp_err_t err; + const char *topic = "test"; + const char *transactional_id = "txnid"; + int remains = 0; + thrd_t thrd; + struct _txn_concurrent_state state = RD_ZERO_INIT; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + + SUB_TEST("%s", do_commit ? "commit" : "abort"); + + test_timeout_set(90); + + rk = create_txn_producer(&mcluster, transactional_id, 1, NULL); + + /* Set broker RTT to 5s so that the background thread has ample + * time to call its conflicting APIs. */ + rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 5000); + + err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err)); + + /* Set up shared state between us and the concurrent thread */ + mtx_init(&state.lock, mtx_plain); + state.test = test_curr; + state.rk = rk; + + /* We release the lock only while calling the TXN API */ + mtx_lock(&state.lock); + + /* Spin up concurrent thread */ + if (thrd_create(&thrd, txn_concurrent_thread_main, (void *)&state) != + thrd_success) + TEST_FAIL("Failed to create thread"); + +#define _start_call(callname) \ + do { \ + state.api = callname; \ + mtx_unlock(&state.lock); \ + } while (0) +#define _end_call() mtx_lock(&state.lock) + + _start_call("init_transactions"); + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + _end_call(); + + /* This call doesn't block, so can't really be tested concurrently. */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, 10, + NULL, 0, &remains); + + _start_call("send_offsets_to_transaction"); + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic", 0)->offset = 12; + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + TEST_CALL_ERROR__( + rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1)); + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + _end_call(); + + if (do_commit) { + _start_call("commit_transaction"); + TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1)); + _end_call(); + } else { + _start_call("abort_transaction"); + TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1)); + _end_call(); + } + + /* Signal completion to background thread */ + state.api = NULL; + + mtx_unlock(&state.lock); + + thrd_join(thrd, NULL); + + rd_kafka_destroy(rk); + + mtx_destroy(&state.lock); + + SUB_TEST_PASS(); +} + + +/** + * @brief KIP-360: Test that fatal idempotence errors triggers abortable + * transaction errors, but let the broker-side abort of the + * transaction fail with a fencing error. + * Should raise a fatal error. + * + * @param error_code Which error code EndTxn should fail with. + * Either RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH (older) + * or RD_KAFKA_RESP_ERR_PRODUCER_FENCED (newer). + */ +static void do_test_txn_fenced_abort(rd_kafka_resp_err_t error_code) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_error_t *error; + int32_t txn_coord = 2; + const char *txnid = "myTxnId"; + char errstr[512]; + rd_kafka_resp_err_t fatal_err; + size_t errors_cnt; + + SUB_TEST_QUICK("With error %s", rd_kafka_err2name(error_code)); + + rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1", + NULL); + + rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, + txn_coord); + + test_curr->ignore_dr_err = rd_true; + test_curr->is_fatal_cb = error_is_fatal_cb; + allowed_error = RD_KAFKA_RESP_ERR__FENCED; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1)); + + /* + * Start a transaction + */ + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + + /* Produce a message without error first */ + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + test_flush(rk, -1); + + /* Fail abort transaction */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, txn_coord, RD_KAFKAP_EndTxn, 1, error_code, 0); + + /* Fail the PID reinit */ + rd_kafka_mock_broker_push_request_error_rtts( + mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, error_code, 0); + + /* Produce a message, let it fail with a fatal idempo error. */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_Produce, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID); + + TEST_CALL_ERR__(rd_kafka_producev( + rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END)); + + test_flush(rk, -1); + + /* Abort the transaction, should fail with a fatal error */ + error = rd_kafka_abort_transaction(rk, -1); + TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail"); + + TEST_SAY_ERROR(error, "abort_transaction() failed: "); + TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected a fatal error"); + rd_kafka_error_destroy(error); + + fatal_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr)); + TEST_ASSERT(fatal_err, "Expected a fatal error to have been raised"); + TEST_SAY("Fatal error: %s: %s\n", rd_kafka_err2name(fatal_err), errstr); + + /* Verify that the producer sent the expected number of EndTxn requests + * by inspecting the mock broker error stack, + * which should now be empty. */ + if (rd_kafka_mock_broker_error_stack_cnt( + mcluster, txn_coord, RD_KAFKAP_EndTxn, &errors_cnt)) { + TEST_FAIL( + "Broker error count should succeed for API %s" + " on broker %" PRId32, + rd_kafka_ApiKey2str(RD_KAFKAP_EndTxn), txn_coord); + } + /* Checks all the RD_KAFKAP_EndTxn responses have been consumed */ + TEST_ASSERT(errors_cnt == 0, + "Expected error count 0 for API %s, found %zu", + rd_kafka_ApiKey2str(RD_KAFKAP_EndTxn), errors_cnt); + + if (rd_kafka_mock_broker_error_stack_cnt( + mcluster, txn_coord, RD_KAFKAP_InitProducerId, &errors_cnt)) { + TEST_FAIL( + "Broker error count should succeed for API %s" + " on broker %" PRId32, + rd_kafka_ApiKey2str(RD_KAFKAP_InitProducerId), txn_coord); + } + /* Checks none of the RD_KAFKAP_InitProducerId responses have been + * consumed + */ + TEST_ASSERT(errors_cnt == 1, + "Expected error count 1 for API %s, found %zu", + rd_kafka_ApiKey2str(RD_KAFKAP_InitProducerId), errors_cnt); + + /* All done */ + rd_kafka_destroy(rk); + + allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR; + + SUB_TEST_PASS(); +} + + +/** + * @brief Test that the TxnOffsetCommit op doesn't retry without waiting + * if the coordinator is found but not available, causing too frequent retries. + */ +static void +do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_bool_t times_out) { + rd_kafka_t *rk; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_resp_err_t err; + rd_kafka_topic_partition_list_t *offsets; + rd_kafka_consumer_group_metadata_t *cgmetadata; + rd_kafka_error_t *error; + int timeout; + + SUB_TEST_QUICK("times_out=%s", RD_STR_ToF(times_out)); + + rk = create_txn_producer(&mcluster, "txnid", 3, NULL); + + test_curr->ignore_dr_err = rd_true; + + TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000)); + + TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk)); + + err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"), + RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END); + TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err)); + + /* Wait for messages to be delivered */ + test_flush(rk, 5000); + + /* + * Fail TxnOffsetCommit with COORDINATOR_NOT_AVAILABLE + * repeatedly. + */ + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_TxnOffsetCommit, 4, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE); + + offsets = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(offsets, "srctopic", 3)->offset = 1; + + cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid"); + + /* The retry delay is 500ms, with 4 retries it should take at least + * 2000ms for this call to succeed. */ + timeout = times_out ? 500 : 4000; + error = rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, + timeout); + rd_kafka_consumer_group_metadata_destroy(cgmetadata); + rd_kafka_topic_partition_list_destroy(offsets); + + if (times_out) { + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + "expected %s, got: %s", + rd_kafka_err2name( + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE), + rd_kafka_err2str(rd_kafka_error_code(error))); + } else { + TEST_ASSERT(rd_kafka_error_code(error) == + RD_KAFKA_RESP_ERR_NO_ERROR, + "expected \"Success\", found: %s", + rd_kafka_err2str(rd_kafka_error_code(error))); + } + rd_kafka_error_destroy(error); + + /* All done */ + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + + +int main_0105_transactions_mock(int argc, char **argv) { + if (test_needs_auth()) { + TEST_SKIP("Mock cluster does not support SSL/SASL\n"); + return 0; + } + + do_test_txn_recoverable_errors(); + + do_test_txn_fatal_idempo_errors(); + + do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH); + do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_PRODUCER_FENCED); + + do_test_txn_req_cnt(); + + do_test_txn_requires_abort_errors(); + + do_test_txn_slow_reinit(rd_false); + do_test_txn_slow_reinit(rd_true); + + /* Just do a subset of tests in quick mode */ + if (test_quick) + return 0; + + do_test_txn_endtxn_errors(); + + do_test_txn_endtxn_infinite(); + + do_test_txn_endtxn_timeout(); + + do_test_txn_endtxn_timeout_inflight(); + + /* Bring down the coordinator */ + do_test_txn_broker_down_in_txn(rd_true); + + /* Bring down partition leader */ + do_test_txn_broker_down_in_txn(rd_false); + + do_test_txns_not_supported(); + + do_test_txns_send_offsets_concurrent_is_retried(); + + do_test_txns_send_offsets_non_eligible(); + + do_test_txn_coord_req_destroy(); + + do_test_txn_coord_req_multi_find(); + + do_test_txn_addparts_req_multi(); + + do_test_txns_no_timeout_crash(); + + do_test_txn_auth_failure( + RD_KAFKAP_InitProducerId, + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED); + + do_test_txn_auth_failure( + RD_KAFKAP_FindCoordinator, + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED); + + do_test_txn_flush_timeout(); + + do_test_unstable_offset_commit(); + + do_test_commit_after_msg_timeout(); + + do_test_txn_switch_coordinator(); + + do_test_txn_switch_coordinator_refresh(); + + do_test_out_of_order_seq(); + + do_test_topic_disappears_for_awhile(); + + do_test_disconnected_group_coord(rd_false); + + do_test_disconnected_group_coord(rd_true); + + do_test_txn_coordinator_null_not_fatal(); + + do_test_txn_resumable_calls_timeout(rd_true); + + do_test_txn_resumable_calls_timeout(rd_false); + + do_test_txn_resumable_calls_timeout_error(rd_true); + + do_test_txn_resumable_calls_timeout_error(rd_false); + do_test_txn_resumable_init(); + + do_test_txn_concurrent_operations(rd_true /*commit*/); + + do_test_txn_concurrent_operations(rd_false /*abort*/); + + do_test_txn_fenced_abort(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH); + + do_test_txn_fenced_abort(RD_KAFKA_RESP_ERR_PRODUCER_FENCED); + + do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_true); + + do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_false); return 0; } diff -Nru librdkafka-1.9.2/tests/0110-batch_size.cpp librdkafka-2.0.2/tests/0110-batch_size.cpp --- librdkafka-1.9.2/tests/0110-batch_size.cpp 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0110-batch_size.cpp 2023-01-20 09:14:36.000000000 +0000 @@ -133,6 +133,8 @@ if (!p) Test::Fail("Failed to create Producer: " + errstr); + delete conf; + /* Produce messages */ char val[msgsize]; memset(val, 'a', msgsize); diff -Nru librdkafka-1.9.2/tests/0117-mock_errors.c librdkafka-2.0.2/tests/0117-mock_errors.c --- librdkafka-1.9.2/tests/0117-mock_errors.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/0117-mock_errors.c 2023-01-20 09:14:36.000000000 +0000 @@ -255,6 +255,54 @@ SUB_TEST_PASS(); } +/** + * @brief Verify that a cluster roll does not cause consumer_poll() to return + * the temporary and retriable COORDINATOR_LOAD_IN_PROGRESS error. We should + * backoff and retry in that case. + */ +static void do_test_joingroup_coordinator_load_in_progress() { + rd_kafka_conf_t *conf; + rd_kafka_t *consumer; + rd_kafka_mock_cluster_t *mcluster; + const char *bootstraps; + const char *topic = "test"; + const int msgcnt = 1; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 60); + + mcluster = test_mock_cluster_new(1, &bootstraps); + + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10, + "bootstrap.servers", bootstraps, + "batch.num.messages", "1", NULL); + + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "auto.offset.reset", "earliest"); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_FindCoordinator, 1, + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS); + + consumer = test_create_consumer("mygroup", NULL, conf, NULL); + + test_consumer_subscribe(consumer, topic); + + /* Wait for assignment and one message */ + test_consumer_poll("consumer", consumer, 0, -1, -1, msgcnt, NULL); + + test_consumer_close(consumer); + + rd_kafka_destroy(consumer); + + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + int main_0117_mock_errors(int argc, char **argv) { if (test_needs_auth()) { @@ -270,5 +318,7 @@ do_test_offset_commit_request_timed_out(rd_true); do_test_offset_commit_request_timed_out(rd_false); + do_test_joingroup_coordinator_load_in_progress(); + return 0; } diff -Nru librdkafka-1.9.2/tests/0133-ssl_keys.c librdkafka-2.0.2/tests/0133-ssl_keys.c --- librdkafka-1.9.2/tests/0133-ssl_keys.c 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/0133-ssl_keys.c 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,113 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +#include "rdstring.h" + +/** + * @brief Tests reading SSL PKCS#12 keystore or PEM certificate and key from + * file. Decoding it with the correct password or not. + * + * Ensures it's read correctly on Windows too. + * See https://github.com/edenhill/librdkafka/issues/3992 + */ +static void do_test_ssl_keys(const char *type, rd_bool_t correct_password) { +#define TEST_FIXTURES_FOLDER "./fixtures" +#define TEST_FIXTURES_SSL_FOLDER TEST_FIXTURES_FOLDER "/ssl/" +#define TEST_FIXTURES_KEYSTORE_PASSWORD "use_strong_password_keystore_client" +#define TEST_FIXTURES_KEY_PASSWORD "use_strong_password_keystore_client2" +#define TEST_KEYSTORE_LOCATION TEST_FIXTURES_SSL_FOLDER "client.keystore.p12" +#define TEST_CERTIFICATE_LOCATION \ + TEST_FIXTURES_SSL_FOLDER "client2.certificate.pem" +#define TEST_KEY_LOCATION TEST_FIXTURES_SSL_FOLDER "client2.key" + + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + char errstr[256]; + + SUB_TEST_QUICK("keystore type = %s, correct password = %s", type, + RD_STR_ToF(correct_password)); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "security.protocol", "SSL"); + + if (!strcmp(type, "PKCS12")) { + test_conf_set(conf, "ssl.keystore.location", + TEST_KEYSTORE_LOCATION); + if (correct_password) + test_conf_set(conf, "ssl.keystore.password", + TEST_FIXTURES_KEYSTORE_PASSWORD); + else + test_conf_set(conf, "ssl.keystore.password", + TEST_FIXTURES_KEYSTORE_PASSWORD + " and more"); + } else if (!strcmp(type, "PEM")) { + test_conf_set(conf, "ssl.certificate.location", + TEST_CERTIFICATE_LOCATION); + test_conf_set(conf, "ssl.key.location", TEST_KEY_LOCATION); + if (correct_password) + test_conf_set(conf, "ssl.key.password", + TEST_FIXTURES_KEY_PASSWORD); + else + test_conf_set(conf, "ssl.keystore.password", + TEST_FIXTURES_KEYSTORE_PASSWORD + " and more"); + } else { + TEST_FAIL("Unexpected key type\n"); + } + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if ((rk != NULL) != correct_password) { + TEST_FAIL("Expected rd_kafka creation to %s\n", + correct_password ? "succeed" : "fail"); + } + + if (rk) + rd_kafka_destroy(rk); + else + rd_kafka_conf_destroy(conf); + + SUB_TEST_PASS(); + +#undef TEST_FIXTURES_KEYSTORE_PASSWORD +#undef TEST_FIXTURES_KEY_PASSWORD +#undef TEST_KEYSTORE_LOCATION +#undef TEST_CERTIFICATE_LOCATION +#undef TEST_KEY_LOCATION +#undef TEST_FIXTURES_FOLDER +#undef TEST_FIXTURES_SSL_FOLDER +} + + +int main_0133_ssl_keys(int argc, char **argv) { + do_test_ssl_keys("PKCS12", rd_true); + do_test_ssl_keys("PKCS12", rd_false); + do_test_ssl_keys("PEM", rd_true); + do_test_ssl_keys("PEM", rd_false); + return 0; +} diff -Nru librdkafka-1.9.2/tests/0134-ssl_provider.c librdkafka-2.0.2/tests/0134-ssl_provider.c --- librdkafka-1.9.2/tests/0134-ssl_provider.c 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/0134-ssl_provider.c 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,92 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + + +static void test_providers(const char *providers, + rd_bool_t must_pass, + rd_bool_t must_fail) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + char errstr[512]; + + SUB_TEST_QUICK("providers=%s, %s pass, %s fail", providers, + must_pass ? "must" : "may", must_fail ? "must" : "may"); + + test_conf_init(&conf, NULL, 10); + + /* Enable debugging so we get some extra information on + * OpenSSL version and provider versions in the test log. */ + test_conf_set(conf, "debug", "security"); + test_conf_set(conf, "ssl.providers", providers); + test_conf_set(conf, "security.protocol", "ssl"); + + rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + + TEST_SAY("rd_kafka_new(ssl.providers=%s): %s\n", providers, + rk ? "success" : errstr); + + if (must_pass && !rk) + TEST_FAIL("Expected ssl.providers=%s to work, got %s", + providers, errstr); + else if (must_fail && rk) + TEST_FAIL("Expected ssl.providers=%s to fail", providers); + + if (!rk) + rd_kafka_conf_destroy(conf); + else + rd_kafka_destroy(rk); + + SUB_TEST_PASS(); +} + +int main_0134_ssl_provider(int argc, char **argv) { + rd_kafka_conf_t *conf; + char errstr[512]; + rd_kafka_conf_res_t res; + + test_conf_init(&conf, NULL, 10); + + /* Check that we're linked/built with OpenSSL 3.x */ + res = rd_kafka_conf_set(conf, "ssl.providers", "a,b", errstr, + sizeof(errstr)); + rd_kafka_conf_destroy(conf); + if (res == RD_KAFKA_CONF_INVALID) { + TEST_SKIP("%s\n", errstr); + return 0; + } + + /* Must pass since 'default' is always built in */ + test_providers("default", rd_true, rd_false); + /* May fail, if legacy provider is not available. */ + test_providers("default,legacy", rd_false, rd_false); + /* Must fail since non-existent provider */ + test_providers("default,thisProviderDoesNotExist", rd_false, rd_true); + return 0; +} diff -Nru librdkafka-1.9.2/tests/0135-sasl_credentials.cpp librdkafka-2.0.2/tests/0135-sasl_credentials.cpp --- librdkafka-1.9.2/tests/0135-sasl_credentials.cpp 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/0135-sasl_credentials.cpp 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,143 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/** + * Verify that SASL credentials can be updated. + */ +#include "testcpp.h" + + + +class authErrorEventCb : public RdKafka::EventCb { + public: + authErrorEventCb() : error_seen(false) { + } + + void event_cb(RdKafka::Event &event) { + switch (event.type()) { + case RdKafka::Event::EVENT_ERROR: + Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << ": " + << event.str() << "\n"); + if (event.err() == RdKafka::ERR__AUTHENTICATION) + error_seen = true; + break; + + case RdKafka::Event::EVENT_LOG: + Test::Say(tostr() << "Log: " << event.str() << "\n"); + break; + + default: + break; + } + } + + bool error_seen; +}; + + +/** + * @brief Test setting SASL credentials. + * + * 1. Switch out the proper username/password for invalid ones. + * 2. Verify that we get an auth failure. + * 3. Set the proper username/passwords. + * 4. Verify that we can now connect. + */ +static void do_test(bool set_after_auth_failure) { + RdKafka::Conf *conf; + Test::conf_init(&conf, NULL, 30); + + SUB_TEST_QUICK("set_after_auth_failure=%s", + set_after_auth_failure ? "yes" : "no"); + + /* Get the correct sasl.username and sasl.password */ + std::string username, password; + if (conf->get("sasl.username", username) || + conf->get("sasl.password", password)) { + delete conf; + SUB_TEST_SKIP("sasl.username and/or sasl.password not configured\n"); + return; + } + + /* Replace with incorrect ones */ + Test::conf_set(conf, "sasl.username", "ThisIsNotRight"); + Test::conf_set(conf, "sasl.password", "Neither Is This"); + + /* Set up an event callback to track authentication errors */ + authErrorEventCb pEvent = authErrorEventCb(); + std::string errstr; + if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK) + Test::Fail(errstr); + + /* Create client */ + RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr); + if (!p) + Test::Fail("Failed to create Producer: " + errstr); + delete conf; + + if (set_after_auth_failure) { + Test::Say("Awaiting auth failure\n"); + + while (!pEvent.error_seen) + p->poll(1000); + + Test::Say("Authentication error seen\n"); + } + + Test::Say("Setting proper credentials\n"); + RdKafka::Error *error = p->sasl_set_credentials(username, password); + if (error) + Test::Fail("Failed to set credentials: " + error->str()); + + Test::Say("Expecting successful cluster authentication\n"); + const std::string clusterid = p->clusterid(5 * 1000); + + if (clusterid.empty()) + Test::Fail("Expected clusterid() to succeed"); + + delete p; + + SUB_TEST_PASS(); +} + +extern "C" { +int main_0135_sasl_credentials(int argc, char **argv) { + const char *mech = test_conf_get(NULL, "sasl.mechanism"); + + if (strcmp(mech, "PLAIN") && strncmp(mech, "SCRAM", 5)) { + Test::Skip("Test requires SASL PLAIN or SASL SCRAM\n"); + return 0; + } + + do_test(false); + do_test(true); + + return 0; +} +} diff -Nru librdkafka-1.9.2/tests/0136-resolve_cb.c librdkafka-2.0.2/tests/0136-resolve_cb.c --- librdkafka-1.9.2/tests/0136-resolve_cb.c 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/0136-resolve_cb.c 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,181 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "rdkafka.h" + +#ifndef _WIN32 +#include +#else +#define WIN32_MEAN_AND_LEAN +#include +#include +#include +#endif + +/** + * @name Test a custom address resolution callback. + * + * The test sets bogus bootstrap.servers, uses the resolution callback to + * resolve to a bogus address, and then verifies that the address is passed + * to the connect callback. If the resolution callback is not invoked, or if the + * connect callback is not invoked with the output of the resolution callback, + * the test will fail. + */ + +/** + * Stage of the test: + * 0: expecting resolve_cb to be invoked with TESTING_RESOLVE_CB:1234 + * 1: expecting resolve_cb to be invoked with NULL, NULL + * 2: expecting connect_cb to invoked with socket address 127.1.2.3:57616 + * 3: done + */ +static rd_atomic32_t stage; + +/** Exposes current test struct (in TLS) to callbacks. */ +static struct test *this_test; + +static int resolve_cb(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque) { + + int32_t cnt; + + test_curr = this_test; + + cnt = rd_atomic32_get(&stage); + + TEST_SAY("resolve_cb invoked: node=%s service=%s stage=%d\n", node, + service, cnt); + + if (cnt == 0) { + /* Stage 0: return a bogus address. */ + + struct sockaddr_in *addr; + + TEST_ASSERT(node != NULL); + TEST_ASSERT(strcmp(node, "TESTING_RESOLVE_CB") == 0, + "unexpected node: %s", node); + TEST_ASSERT(service != NULL); + TEST_ASSERT(strcmp(service, "1234") == 0, + "unexpected service: %s", service); + + addr = calloc(1, sizeof(struct sockaddr_in)); + addr->sin_family = AF_INET; + addr->sin_port = htons(4321); + addr->sin_addr.s_addr = htonl(0x7f010203) /* 127.1.2.3 */; + + *res = calloc(1, sizeof(struct addrinfo)); + (*res)->ai_family = AF_INET; + (*res)->ai_socktype = SOCK_STREAM; + (*res)->ai_protocol = IPPROTO_TCP; + (*res)->ai_addrlen = sizeof(struct sockaddr_in); + (*res)->ai_addr = (struct sockaddr *)addr; + } else if (cnt == 1) { + /* Stage 1: free the bogus address returned in stage 0. */ + + TEST_ASSERT(node == NULL); + TEST_ASSERT(service == NULL); + TEST_ASSERT(hints == NULL); + free((*res)->ai_addr); + free(*res); + } else { + /* Stage 2+: irrelevant, simply fail to resolve. */ + + return -1; + } + + rd_atomic32_add(&stage, 1); + return 0; +} + +static int connect_cb(int s, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque) { + /* Stage 3: assert address is expected bogus. */ + + int32_t cnt; + struct sockaddr_in *addr_in; + + test_curr = this_test; + + cnt = rd_atomic32_get(&stage); + + TEST_SAY("connect_cb invoked: stage=%d\n", cnt); + + TEST_ASSERT(cnt == 2, "connect_cb invoked in unexpected stage: %d", + cnt); + + TEST_ASSERT(addr->sa_family == AF_INET, + "address has unexpected type: %d", addr->sa_family); + + addr_in = (struct sockaddr_in *)(void *)addr; + + TEST_ASSERT(addr_in->sin_port == htons(4321), + "address has unexpected port: %d", + ntohs(addr_in->sin_port)); + TEST_ASSERT(addr_in->sin_addr.s_addr == htonl(0x7f010203), + "address has unexpected host: 0x%x", + ntohl(addr_in->sin_addr.s_addr)); + + rd_atomic32_add(&stage, 1); + + /* The test has succeeded. Just report the connection as faile + * for simplicity. */ + return -1; +} + +int main_0136_resolve_cb(int argc, char **argv) { + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + + this_test = test_curr; + + rd_atomic32_init(&stage, 0); + + test_conf_init(&conf, NULL, 0); + rd_kafka_conf_set_resolve_cb(conf, resolve_cb); + rd_kafka_conf_set_connect_cb(conf, connect_cb); + + TEST_SAY("Setting bogus broker list\n"); + test_conf_set(conf, "bootstrap.servers", "TESTING_RESOLVE_CB:1234"); + + rk = test_create_handle(RD_KAFKA_PRODUCER, conf); + + while (rd_atomic32_get(&stage) != 3) + rd_sleep(1); + + rd_kafka_destroy(rk); + + return 0; +} diff -Nru librdkafka-1.9.2/tests/0137-barrier_batch_consume.c librdkafka-2.0.2/tests/0137-barrier_batch_consume.c --- librdkafka-1.9.2/tests/0137-barrier_batch_consume.c 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/0137-barrier_batch_consume.c 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,290 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2022, Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" +/* Typical include path would be , but this program + * is built from within the librdkafka source tree and thus differs. */ +#include "rdkafka.h" /* for Kafka driver */ + +typedef struct consumer_s { + const char *what; + rd_kafka_queue_t *rkq; + int timeout_ms; + int consume_msg_cnt; + int expected_msg_cnt; + rd_kafka_t *rk; + uint64_t testid; + test_msgver_t *mv; + struct test *test; +} consumer_t; + +static int consumer_batch_queue(void *arg) { + consumer_t *arguments = arg; + int msg_cnt = 0; + int i; + test_timing_t t_cons; + + rd_kafka_queue_t *rkq = arguments->rkq; + int timeout_ms = arguments->timeout_ms; + const int consume_msg_cnt = arguments->consume_msg_cnt; + rd_kafka_t *rk = arguments->rk; + uint64_t testid = arguments->testid; + rd_kafka_message_t **rkmessage = + malloc(consume_msg_cnt * sizeof(*rkmessage)); + + if (arguments->test) + test_curr = arguments->test; + + TEST_SAY( + "%s calling consume_batch_queue(timeout=%d, msgs=%d) " + "and expecting %d messages back\n", + rd_kafka_name(rk), timeout_ms, consume_msg_cnt, + arguments->expected_msg_cnt); + + TIMING_START(&t_cons, "CONSUME"); + msg_cnt = (int)rd_kafka_consume_batch_queue(rkq, timeout_ms, rkmessage, + consume_msg_cnt); + TIMING_STOP(&t_cons); + + TEST_SAY("%s consumed %d/%d/%d message(s)\n", rd_kafka_name(rk), + msg_cnt, arguments->consume_msg_cnt, + arguments->expected_msg_cnt); + TEST_ASSERT(msg_cnt == arguments->expected_msg_cnt, + "consumed %d messages, expected %d", msg_cnt, + arguments->expected_msg_cnt); + + for (i = 0; i < msg_cnt; i++) { + if (test_msgver_add_msg(rk, arguments->mv, rkmessage[i]) == 0) + TEST_FAIL( + "The message is not from testid " + "%" PRId64, + testid); + rd_kafka_message_destroy(rkmessage[i]); + } + + return 0; +} + + +static void do_test_consume_batch_with_seek(void) { + rd_kafka_queue_t *rkq; + const char *topic; + rd_kafka_t *consumer; + int p; + uint64_t testid; + rd_kafka_conf_t *conf; + consumer_t consumer_args = RD_ZERO_INIT; + test_msgver_t mv; + thrd_t thread_id; + rd_kafka_error_t *err; + rd_kafka_topic_partition_list_t *seek_toppars; + const int produce_partition_cnt = 2; + const int timeout_ms = 10000; + const int consume_msg_cnt = 10; + const int produce_msg_cnt = 8; + const int32_t seek_partition = 0; + const int64_t seek_offset = 1; + const int expected_msg_cnt = produce_msg_cnt - seek_offset; + + SUB_TEST(); + + test_conf_init(&conf, NULL, 60); + test_conf_set(conf, "enable.auto.commit", "false"); + test_conf_set(conf, "auto.offset.reset", "earliest"); + + testid = test_id_generate(); + test_msgver_init(&mv, testid); + + /* Produce messages */ + topic = test_mk_topic_name("0137-barrier_batch_consume", 1); + + for (p = 0; p < produce_partition_cnt; p++) + test_produce_msgs_easy(topic, testid, p, + produce_msg_cnt / produce_partition_cnt); + + /* Create consumers */ + consumer = + test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); + + test_consumer_subscribe(consumer, topic); + test_consumer_wait_assignment(consumer, rd_false); + + /* Create generic consume queue */ + rkq = rd_kafka_queue_get_consumer(consumer); + + consumer_args.what = "CONSUMER"; + consumer_args.rkq = rkq; + consumer_args.timeout_ms = timeout_ms; + consumer_args.consume_msg_cnt = consume_msg_cnt; + consumer_args.expected_msg_cnt = expected_msg_cnt; + consumer_args.rk = consumer; + consumer_args.testid = testid; + consumer_args.mv = &mv; + consumer_args.test = test_curr; + if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) != + thrd_success) + TEST_FAIL("Failed to create thread for %s", "CONSUMER"); + + seek_toppars = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(seek_toppars, topic, seek_partition); + rd_kafka_topic_partition_list_set_offset(seek_toppars, topic, + seek_partition, seek_offset); + err = rd_kafka_seek_partitions(consumer, seek_toppars, 2000); + + TEST_ASSERT(!err, + "Failed to seek partition %d for topic %s to offset %ld", + seek_partition, topic, seek_offset); + + thrd_join(thread_id, NULL); + + test_msgver_verify("CONSUME", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP, + 0, expected_msg_cnt); + test_msgver_clear(&mv); + + rd_kafka_topic_partition_list_destroy(seek_toppars); + + rd_kafka_queue_destroy(rkq); + + test_consumer_close(consumer); + + rd_kafka_destroy(consumer); + + SUB_TEST_PASS(); +} + + +// static void do_test_consume_batch_with_pause_and_resume(void) { +// rd_kafka_queue_t *rkq; +// const char *topic; +// rd_kafka_t *consumer; +// int p; +// uint64_t testid; +// rd_kafka_conf_t *conf; +// consumer_t consumer_args = RD_ZERO_INIT; +// test_msgver_t mv; +// thrd_t thread_id; +// rd_kafka_resp_err_t err; +// rd_kafka_topic_partition_list_t *pause_partition_list; +// rd_kafka_message_t **rkmessages; +// size_t msg_cnt; +// const int timeout_ms = 10000; +// const int consume_msg_cnt = 10; +// const int produce_msg_cnt = 8; +// const int produce_partition_cnt = 2; +// const int expected_msg_cnt = 4; +// int32_t pause_partition = 0; + +// SUB_TEST(); + +// test_conf_init(&conf, NULL, 60); +// test_conf_set(conf, "enable.auto.commit", "false"); +// test_conf_set(conf, "auto.offset.reset", "earliest"); + +// testid = test_id_generate(); +// test_msgver_init(&mv, testid); + +// /* Produce messages */ +// topic = test_mk_topic_name("0137-barrier_batch_consume", 1); + +// for (p = 0; p < produce_partition_cnt; p++) +// test_produce_msgs_easy(topic, testid, p, +// produce_msg_cnt / +// produce_partition_cnt); + +// /* Create consumers */ +// consumer = +// test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL); + +// test_consumer_subscribe(consumer, topic); +// test_consumer_wait_assignment(consumer, rd_false); + +// /* Create generic consume queue */ +// rkq = rd_kafka_queue_get_consumer(consumer); + +// consumer_args.what = "CONSUMER"; +// consumer_args.rkq = rkq; +// consumer_args.timeout_ms = timeout_ms; +// consumer_args.consume_msg_cnt = consume_msg_cnt; +// consumer_args.expected_msg_cnt = expected_msg_cnt; +// consumer_args.rk = consumer; +// consumer_args.testid = testid; +// consumer_args.mv = &mv; +// consumer_args.test = test_curr; +// if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) != +// thrd_success) +// TEST_FAIL("Failed to create thread for %s", "CONSUMER"); + +// pause_partition_list = rd_kafka_topic_partition_list_new(1); +// rd_kafka_topic_partition_list_add(pause_partition_list, topic, +// pause_partition); + +// rd_sleep(1); +// err = rd_kafka_pause_partitions(consumer, pause_partition_list); + +// TEST_ASSERT(!err, "Failed to pause partition %d for topic %s", +// pause_partition, topic); + +// rd_sleep(1); + +// err = rd_kafka_resume_partitions(consumer, pause_partition_list); + +// TEST_ASSERT(!err, "Failed to resume partition %d for topic %s", +// pause_partition, topic); + +// thrd_join(thread_id, NULL); + +// rkmessages = malloc(consume_msg_cnt * sizeof(*rkmessages)); + +// msg_cnt = rd_kafka_consume_batch_queue(rkq, timeout_ms, rkmessages, +// consume_msg_cnt); + +// TEST_ASSERT(msg_cnt == expected_msg_cnt, +// "consumed %zu messages, expected %d", msg_cnt, +// expected_msg_cnt); + +// test_msgver_verify("CONSUME", &mv, TEST_MSGVER_ORDER | +// TEST_MSGVER_DUP, +// 0, produce_msg_cnt); +// test_msgver_clear(&mv); + +// rd_kafka_queue_destroy(rkq); + +// test_consumer_close(consumer); + +// rd_kafka_destroy(consumer); + +// SUB_TEST_PASS(); +// } + + +int main_0137_barrier_batch_consume(int argc, char **argv) { + do_test_consume_batch_with_seek(); + // FIXME: Run this test once consume batch is fully fixed. + // do_test_consume_batch_with_pause_and_resume(); + return 0; +} diff -Nru librdkafka-1.9.2/tests/0138-admin_mock.c librdkafka-2.0.2/tests/0138-admin_mock.c --- librdkafka-1.9.2/tests/0138-admin_mock.c 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/0138-admin_mock.c 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,189 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "test.h" + +#include "../src/rdkafka_proto.h" + +#include + +/** + * @brief Verify that a error codes returned by the OffsetCommit call of + * AlterConsumerGroupOffsets return the corresponding error code + * in the passed partition. + */ +static void do_test_AlterConsumerGroupOffsets_errors(int req_timeout_ms) { +#define TEST_ERR_SIZE 10 + int i, j; + rd_kafka_conf_t *conf; + rd_kafka_t *rk; + rd_kafka_queue_t *q; + rd_kafka_mock_cluster_t *mcluster; + rd_kafka_topic_partition_list_t *to_alter; + const rd_kafka_topic_partition_list_t *partitions; + rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets; + const rd_kafka_AlterConsumerGroupOffsets_result_t *res; + const rd_kafka_group_result_t **gres; + size_t gres_cnt; + char errstr[512]; + const char *bootstraps; + const char *topic = "test"; + const char *group_id = topic; + rd_kafka_AdminOptions_t *options = NULL; + rd_kafka_event_t *rkev = NULL; + rd_kafka_resp_err_t errs[TEST_ERR_SIZE] = { + RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS, + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS, + RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, + RD_KAFKA_RESP_ERR_NOT_COORDINATOR, + RD_KAFKA_RESP_ERR_INVALID_GROUP_ID, + RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE, + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED, + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART, + RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE, + RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED}; + + SUB_TEST_QUICK("request timeout %d", req_timeout_ms); + + test_conf_init(&conf, NULL, 60); + + mcluster = test_mock_cluster_new(1, &bootstraps); + + rd_kafka_mock_topic_create(mcluster, topic, 1, 1); + + test_conf_set(conf, "bootstrap.servers", bootstraps); + + rk = test_create_handle(RD_KAFKA_CONSUMER, conf); + + q = rd_kafka_queue_get_main(rk); + + if (req_timeout_ms > 0) { + /* Admin options */ + options = rd_kafka_AdminOptions_new( + rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS); + TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout( + options, req_timeout_ms, errstr, sizeof(errstr))); + } + + + for (i = 0; i < TEST_ERR_SIZE; i++) { + /* Offsets to alter */ + to_alter = rd_kafka_topic_partition_list_new(1); + rd_kafka_topic_partition_list_add(to_alter, topic, 0)->offset = + 3; + cgoffsets = + rd_kafka_AlterConsumerGroupOffsets_new(group_id, to_alter); + + TEST_SAY("Call AlterConsumerGroupOffsets, err %s\n", + rd_kafka_err2name(errs[i])); + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_OffsetCommit, 1, errs[i]); + rd_kafka_AlterConsumerGroupOffsets(rk, &cgoffsets, 1, options, + q); + + rd_kafka_topic_partition_list_destroy(to_alter); + rd_kafka_AlterConsumerGroupOffsets_destroy(cgoffsets); + + TEST_SAY("AlterConsumerGroupOffsets.queue_poll, err %s\n", + rd_kafka_err2name(errs[i])); + /* Poll result queue for AlterConsumerGroupOffsets result. + * Print but otherwise ignore other event types + * (typically generic Error events). */ + while (1) { + rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000)); + TEST_SAY("AlterConsumerGroupOffsets: got %s\n", + rd_kafka_event_name(rkev)); + if (rkev == NULL) + continue; + if (rd_kafka_event_error(rkev)) + TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev), + rd_kafka_event_error_string(rkev)); + + if (rd_kafka_event_type(rkev) == + RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT) + break; + + rd_kafka_event_destroy(rkev); + } + + /* Convert event to proper result */ + res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev); + TEST_ASSERT(res, + "expected AlterConsumerGroupOffsets_result, not %s", + rd_kafka_event_name(rkev)); + + gres = rd_kafka_AlterConsumerGroupOffsets_result_groups( + res, &gres_cnt); + TEST_ASSERT(gres && gres_cnt == 1, + "expected gres_cnt == 1, not %" PRIusz, gres_cnt); + + partitions = rd_kafka_group_result_partitions(gres[0]); + + /* Verify expected errors */ + for (j = 0; j < partitions->cnt; j++) { + rd_kafka_topic_partition_t *rktpar = + &partitions->elems[j]; + TEST_ASSERT_LATER(rktpar->err == errs[i], + "Result %s [%" PRId32 + "] has error %s, " + "expected %s", + topic, 0, + rd_kafka_err2name(rktpar->err), + rd_kafka_err2name(errs[i])); + } + + rd_kafka_event_destroy(rkev); + } + if (options) + rd_kafka_AdminOptions_destroy(options); + + rd_kafka_queue_destroy(q); + + rd_kafka_destroy(rk); + + test_mock_cluster_destroy(mcluster); + + TEST_LATER_CHECK(); + + SUB_TEST_PASS(); + +#undef TEST_ERR_SIZE +} + +int main_0138_admin_mock(int argc, char **argv) { + + if (test_needs_auth()) { + TEST_SKIP("Mock cluster does not support SSL/SASL\n"); + return 0; + } + + do_test_AlterConsumerGroupOffsets_errors(-1); + do_test_AlterConsumerGroupOffsets_errors(1000); + + return 0; +} diff -Nru librdkafka-1.9.2/tests/CMakeLists.txt librdkafka-2.0.2/tests/CMakeLists.txt --- librdkafka-1.9.2/tests/CMakeLists.txt 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/CMakeLists.txt 2023-01-20 09:14:36.000000000 +0000 @@ -123,6 +123,12 @@ 0130-store_offsets.c 0131-connect_timeout.c 0132-strategy_ordering.c + 0133-ssl_keys.c + 0134-ssl_provider.c + 0135-sasl_credentials.cpp + 0136-resolve_cb.c + 0137-barrier_batch_consume.c + 0138-admin_mock.c 8000-idle.cpp test.c testcpp.cpp diff -Nru librdkafka-1.9.2/tests/fixtures/ssl/client2.certificate.pem librdkafka-2.0.2/tests/fixtures/ssl/client2.certificate.pem --- librdkafka-1.9.2/tests/fixtures/ssl/client2.certificate.pem 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/fixtures/ssl/client2.certificate.pem 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,109 @@ +Bag Attributes + friendlyName: client2 + localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32 +Key Attributes: +-----BEGIN PRIVATE KEY----- +MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQDMrI+QK7Q6L9TU +cVjEbl4sMu3KhXgs71JNgQl8joFPVjb3PZF6YHegZo0FAOU1F6lysD3NNnI21HIz +LbCe6BJRogNFKtcFvWS6uQok1HperDO/DVQkH9ARAcvlxE/I6dPbb1YCi7EMHrjM +Dle+NXWV3nKCe7BcMkETkki5Bj5fNA5oa/pmS0gSS/HXnB8rxyFv4mB/R+oGC1wO +WOvgn6ip5bKdjMEEnyqYsDCH8w3xYkKlZ6Ag5w1yxnr6D41J64Go2R62MuLrScVr ++4CM+XJl3Y08+emlCz5m5wuh6A31bp7MFY+f3Gs9AI5qiN3tyjZ//EzoIrfb68tQ +td+UvT4fAgMBAAECggEALoLkWQHlgfeOqPxdDL57/hVQvl4YUjXMgTpamoiT0CCq +ewLtxV6YsMW9NC7g53DKG/r7AGBoEhezH/g5E9NvHkfv8E7s8Cv68QfNy1LRwCPn +2nm/7jmggczjtgInk2O3tj0V0ZxHDpcIra5wuBPT9cvIP+i1yi3NZhIvHoTRtbZp +lWelovML6SGcbmYDZHWwL8C/quX2/Vp72dJa7ySatlJCe8lcdolazUAhe6W3FGf2 +DojupWddAbwcogQsjQ0WNgtIov5JDF1vHjLkw0uCvh24P+DYBA0JjHybLTR70Ypp +POwCV5O96JntWfcXYivi4LQrSDFCIDyDwwrbkIkdoQKBgQDuNesfC7C0LJikB+I1 +UgrDJiu4lFVoXwbaWRRuZD58j0mDGeTY9gZzBJ7pJgv3qJbfk1iwpUU25R2Np946 +h63EqpSSoP/TnMBePUBjnu+C5iXxk2KPjNb9Xu8m4Q8tgYvYf5IJ7iLllY2uiT6B +e+0EGAEPvP1HLbPP22IUMsG6jwKBgQDb9X6fHMeHtP6Du+qhqiMmLK6R2lB7cQ1j +2FSDySekabucaFhDpK3n2klw2MfF2oZHMrxAfYFySV1kGMil4dvFox8mGBJHc/d5 +lNXGNOfQbVV8P1NRjaPwjyAAgAPZfZgFr+6s+pawMRGnGw5Y6p03sLnD5FWU9Wfa +vM6RLE5LcQJ/FHiNvB1FEjbC51XGGs7yHdMp7rLQpCeGbz04hEQZGps1tg6DnCGI +bFn5Tg/291GFpbED7ipFyHHoGERU1LLUPBJssi0jzwupfG/HGMiPzK/6ksgXsD5q +O1vtMWol48M+QVy1MCVG2nP/uQASXw5HUBLABJo5KeTDjxlLVHEINQKBgAe54c64 +9hFAPEhoS1+OWFm47BDXeEg9ulitepp+cFQIGrzttVv65tjkA/xgwPOkL19E2vPw +9KENDqi7biDVhCC3EBsIcWvtGN4+ahviM9pQXNZWaxjMPtvuSxN5a6kyDir0+Q8+ +ZhieQJ58Bs78vrT8EipdVNw8mn9GboMO6VkhAoGBAJ+NUvcO3nIVJOCEG3qnweHA +zqa4JyxFonljwsUFKCIHoiKYlp0KW4wTJJIkTKvLYcRY6kMzP/H1Ja9GqdVnf8ou +tJOe793M+HkYUMTxscYGoCXXtsWKN2ZOv8aVBA7RvpJS8gE6ApScUrjeM76h20CS +xxqrrSc37NSjuiaTyOTG +-----END PRIVATE KEY----- +Bag Attributes + friendlyName: client2 + localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32 +subject=C = , ST = , L = , O = , OU = , CN = client2 + +issuer=CN = caroot + +-----BEGIN CERTIFICATE----- +MIIDCzCCAfOgAwIBAgIUIRg5w7eGA6xivHxzAmzh2PLUJq8wDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGY2Fyb290MCAXDTIyMTAwNzE1MTI0NFoYDzIwNTAwMjIx +MTUxMjQ0WjBJMQkwBwYDVQQGEwAxCTAHBgNVBAgTADEJMAcGA1UEBxMAMQkwBwYD +VQQKEwAxCTAHBgNVBAsTADEQMA4GA1UEAxMHY2xpZW50MjCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAMysj5ArtDov1NRxWMRuXiwy7cqFeCzvUk2BCXyO +gU9WNvc9kXpgd6BmjQUA5TUXqXKwPc02cjbUcjMtsJ7oElGiA0Uq1wW9ZLq5CiTU +el6sM78NVCQf0BEBy+XET8jp09tvVgKLsQweuMwOV741dZXecoJ7sFwyQROSSLkG +Pl80Dmhr+mZLSBJL8decHyvHIW/iYH9H6gYLXA5Y6+CfqKnlsp2MwQSfKpiwMIfz +DfFiQqVnoCDnDXLGevoPjUnrgajZHrYy4utJxWv7gIz5cmXdjTz56aULPmbnC6Ho +DfVunswVj5/caz0AjmqI3e3KNn/8TOgit9vry1C135S9Ph8CAwEAAaMhMB8wHQYD +VR0RBBYwFIIHY2xpZW50MoIJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQBd +d5Sl51/aLcCnc5vo2h2fyNQIVbZGbgEyWRbYdHv5a4X7JxUalipvRhXTpYLQ+0R5 +Fzgl5Mwo6dUpJjtzwXZUOAt59WhqVV5+TMe8eDHBl+lKM/YUgZ+kOlGMExEaygrh +cG+/rVZLAgcC+HnHNaIo2guyn6RqFtBMzkRmjhH96AcygbsN5OFHY0NOzGV9WTDJ ++A9dlJIy2bEU/yYpXerdXp9lM8fKaPc0JDYwwESMS7ND70dcpGmrRa9pSTSDPUaK +KSzzOyK+8E5mzcqEbUCrlpz0sklNYDNMIn48Qjkz52Kv8XHvcYS1gv0XvQZtIH3M +x6X3/J+ivx6L72BOm+ar +-----END CERTIFICATE----- +Bag Attributes + friendlyName: CN=caroot +subject=CN = caroot + +issuer=CN = caroot + +-----BEGIN CERTIFICATE----- +MIIDAzCCAeugAwIBAgIUPj85Dz0tuzZERfolrR54arwFPSIwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGY2Fyb290MB4XDTIyMTAwNzE1MTI0MVoXDTMyMTAwNDE1 +MTI0MVowETEPMA0GA1UEAwwGY2Fyb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxfb08Gd64ilCYePn821WJsnCC2/nEYxOHlBzT9tkx6edzpdsvIvj +FO6Weeyb2f1vv6eJsmBaZUdV2CfOHNIhBvw5IemzUaSiCr8688jHUS6uHCxBYCXk +daFDXKO+JhaPN/ys6wOC8SHYRRynIhp6QVNSBzoO/1WT/J3i58R8TErDi5txr+JA +xJd3mnAW4lDiqRLSVQFq3W4jvba3Dy2zK1l4NcShzlYgfsAd9cCi6b+T2mcz9Vl4 +B1qvsOfOMi8AmVTbS77oaxLczBpLyFIrzI5OPNmMw3A7uObgws9QTyYxUfYqc/0m +bO7bHPX0Iz+WPqrzTHZ+3k5QE/bfGIRnsQIDAQABo1MwUTAdBgNVHQ4EFgQUCgQH +18kzzHsk3KbdDB4g+94NL70wHwYDVR0jBBgwFoAUCgQH18kzzHsk3KbdDB4g+94N +L70wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAhKlj3zPuYaMF +UFROvAWeOXIdDIExbHd5qukYj5UStLhoVKe/1ZKMvdAICejMs51QSJ05d22KqeHn +KaTrq3al61rvufkNhrQo2B+qwM5dEV8qGVZGI/oSaWkk5W33FrKHqSUvwdi/saOc +MfQDUuyS7IznLMlR8g0ZcmIPO3cyHPXQhgk80SNJODqpkfgCgHAa1kDz9PmT7VMK +0f/6U3XEkdRdsvWyWDXMSBFx1m/pu9n7fnL8+6QLczyhoX0NhPnOICC3oSYVVuN7 +MOtCLIhwxsv5BlDFnOeBFxq+VKqZDH+z6587Wl0KQyxsJmuJKZ1kYR3XO7j5jw1e +QHIFE8+PTQ== +-----END CERTIFICATE----- +Bag Attributes + friendlyName: caroot + 2.16.840.1.113894.746875.1.1: +subject=CN = caroot + +issuer=CN = caroot + +-----BEGIN CERTIFICATE----- +MIIDAzCCAeugAwIBAgIUPj85Dz0tuzZERfolrR54arwFPSIwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGY2Fyb290MB4XDTIyMTAwNzE1MTI0MVoXDTMyMTAwNDE1 +MTI0MVowETEPMA0GA1UEAwwGY2Fyb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxfb08Gd64ilCYePn821WJsnCC2/nEYxOHlBzT9tkx6edzpdsvIvj +FO6Weeyb2f1vv6eJsmBaZUdV2CfOHNIhBvw5IemzUaSiCr8688jHUS6uHCxBYCXk +daFDXKO+JhaPN/ys6wOC8SHYRRynIhp6QVNSBzoO/1WT/J3i58R8TErDi5txr+JA +xJd3mnAW4lDiqRLSVQFq3W4jvba3Dy2zK1l4NcShzlYgfsAd9cCi6b+T2mcz9Vl4 +B1qvsOfOMi8AmVTbS77oaxLczBpLyFIrzI5OPNmMw3A7uObgws9QTyYxUfYqc/0m +bO7bHPX0Iz+WPqrzTHZ+3k5QE/bfGIRnsQIDAQABo1MwUTAdBgNVHQ4EFgQUCgQH +18kzzHsk3KbdDB4g+94NL70wHwYDVR0jBBgwFoAUCgQH18kzzHsk3KbdDB4g+94N +L70wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAhKlj3zPuYaMF +UFROvAWeOXIdDIExbHd5qukYj5UStLhoVKe/1ZKMvdAICejMs51QSJ05d22KqeHn +KaTrq3al61rvufkNhrQo2B+qwM5dEV8qGVZGI/oSaWkk5W33FrKHqSUvwdi/saOc +MfQDUuyS7IznLMlR8g0ZcmIPO3cyHPXQhgk80SNJODqpkfgCgHAa1kDz9PmT7VMK +0f/6U3XEkdRdsvWyWDXMSBFx1m/pu9n7fnL8+6QLczyhoX0NhPnOICC3oSYVVuN7 +MOtCLIhwxsv5BlDFnOeBFxq+VKqZDH+z6587Wl0KQyxsJmuJKZ1kYR3XO7j5jw1e +QHIFE8+PTQ== +-----END CERTIFICATE----- diff -Nru librdkafka-1.9.2/tests/fixtures/ssl/client2.key librdkafka-2.0.2/tests/fixtures/ssl/client2.key --- librdkafka-1.9.2/tests/fixtures/ssl/client2.key 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/fixtures/ssl/client2.key 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,34 @@ +Bag Attributes + friendlyName: client2 + localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32 +Key Attributes: +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFFDBOBgkqhkiG9w0BBQ0wQTApBgkqhkiG9w0BBQwwHAQILalIN2MbG7QCAggA +MAwGCCqGSIb3DQIJBQAwFAYIKoZIhvcNAwcECD+gqk7gSkEFBIIEwETSFzC1yYTM +/O6lA8BMkl5Wzt4e7Jw7WnfWSmOFTtpXZqOgxvN9dNPsMIpxvU7nF3Iwhqw0WXMF +lpKqCy2FLM+XWqaQYV+2++s23lH0Eqfofc0IZoYk7FB92MAO1dUI7iDJeT0kwrmU +mgAKAqa6e4REZgDEUXYVAOiAHqszs0JjXlsxlPSws2EZQyU8kEALggy+60Jozviq +a9fUZ9JnbtCPkuSOipC8N+erNIEkruzbXRbookTQF+qAyTyXMciL0fTqdAJB/xfO +h66TQvr1XZorqqVPYI+yXwRBF7oVfJyk0kVfhcpo6SoedNJ3onUlyktcF2RPj1xh +612L4ytNp/TN8jvSs5EKHTuwS2+dnYp2jTS4rcbSRe53RylhFudAn9/aZad0/C72 +JXeiax3i0071sWbvKX3YsW/2QCaeMALhiqbzx+8PcgVV9BVfjO8qxJSNjaOwmVRy +I/22pufTDkoNL/aQSiw1NAL22IPdD0uvLCHj27nBct4KancvgSdTxMK9lfwJZet1 +D0S9ChUa2tCY0pDH7F9XUfcS7VAij+VWtlGIyEw7rPOWx6fGT15fj/QnepuJ5xON +qiAH7IhJesWWhG7xp7c3QsdeGNowkMtoLBlz5fEKDRaauPlbLI5IoXy+ZyOO1tIo +kH5wHDE1bn5cWn7qRy5X5HtPga1OjF11R+XquJ88+6gqmxPlsrK45/FiGdP4iLN/ +dp10cnFgAVA2kEaTXCH1LctGlR+3XQgfrwWDfvk7uMtvybqFcEEBv8vBih1UsF6v +RFfoUYq8Zle2x9kX/cfad52FxtDWnhZAgNtT53tWRUb/oAt7fXQxJMlRXKjSV05q +S/uwevnj49eVFdyiroPofipB8LAK4I+gzZ8AYJob5GoRTlPonC1pj/n3vKRsDMOA +Lwy3gXoyQ+/MBUPcDG/ewdusrJncnkAlFNt0w97CmOJU0czuJJw5rRozfvZF1Hs9 +2BVcwVPmZH9Nr3+6Yb+GTCRvsM7DBuLZIEN4WzjoLYAcrjZ2XYLsC6XmnDzIp1HF +nZwrXUROp4MhKuy+SIdFqZLoU/+AIB28WI3euIDDuERSZLff11hphRG5S9wZ8EJH +Jyl2WgP4r8wQtHs71iT06KDFuBcNqGYPwCjnvE86WFXE3wOJ91+l9u8MYvOSVOHq +4iUIpRFD4hlCWOIc1V9QYKf2s8Vkeoop/pUutK5NpLtMFgJpFPNYxyfBL13fo9lM +0iVuoG3W+iDjqZyUPoDxG4rI6Q9WvkswLxVwpMgzDUbUl2aKHcm4Z215dBMm40zh +ft+QzZEnMVzln2eTCcH91IXcsyPPACmKwraAik5ULEn4m++KtdwDZ6R1zzgRJrn9 +FI6L7C0nfKKemBdzGMCzQuciuPLIjfzXHdKr5bb0C1WS88IB0lYIs+pzpvms2P0F +AQ2nDgFKA9xlzX2f1O/YQNKA1ctc8RH5tpZUUVfheIqd0U4udp9Rqecd+/r23ENU +7kjeuxXfUbH83P0hrsQQFkkOeRWWz8+UYvqIEwWaSObdZCvTdIjRpNmmamWsAmsJ +D5Q2AMMMmNwIi5fUKYJgwTfsgY0XIekk6wmugKs3gCj1RKX930b9fniiol/Gv2VS +fJRrqds7F0s= +-----END ENCRYPTED PRIVATE KEY----- Binary files /tmp/tmpvzcbfjt5/kF_uY0gNKH/librdkafka-1.9.2/tests/fixtures/ssl/client.keystore.p12 and /tmp/tmpvzcbfjt5/S9p_UAzdTZ/librdkafka-2.0.2/tests/fixtures/ssl/client.keystore.p12 differ diff -Nru librdkafka-1.9.2/tests/fixtures/ssl/create_keys.sh librdkafka-2.0.2/tests/fixtures/ssl/create_keys.sh --- librdkafka-1.9.2/tests/fixtures/ssl/create_keys.sh 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/fixtures/ssl/create_keys.sh 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,93 @@ +#!/bin/sh +set -e +CA_PASSWORD="${CA_PASSWORD:-use_strong_password_ca}" +KEYSTORE_PASSWORD="${KEYSTORE_PASSWORD:-use_strong_password_keystore}" +TRUSTSTORE_PASSWORD="${TRUSTSTORE_PASSWORD:-use_strong_password_truststore}" +OUTPUT_FOLDER=${OUTPUT_FOLDER:-$( dirname "$0" )} +CNS=${@:-client} + +cd ${OUTPUT_FOLDER} +CA_ROOT_KEY=caroot.key +CA_ROOT_CRT=caroot.crt + +echo "# Generate CA" +openssl req -new -x509 -keyout $CA_ROOT_KEY \ + -out $CA_ROOT_CRT -days 3650 -subj \ + '/CN=caroot/OU=/O=/L=/ST=/C=' -passin "pass:${CA_PASSWORD}" \ + -passout "pass:${CA_PASSWORD}" + +for CN in $CNS; do + KEYSTORE=$CN.keystore.p12 + TRUSTSTORE=$CN.truststore.p12 + SIGNED_CRT=$CN-ca-signed.crt + CERTIFICATE=$CN.certificate.pem + KEY=$CN.key + # Get specific password for this CN + CN_KEYSTORE_PASSWORD="$(eval echo \$${CN}_KEYSTORE_PASSWORD)" + if [ -z "$CN_KEYSTORE_PASSWORD" ]; then + CN_KEYSTORE_PASSWORD=${KEYSTORE_PASSWORD}_$CN + fi + + echo ${CN_KEYSTORE_PASSWORD} + + echo "# $CN: Generate Keystore" + keytool -genkey -noprompt \ + -alias $CN \ + -dname "CN=$CN,OU=,O=,L=,S=,C=" \ + -ext "SAN=dns:$CN,dns:localhost" \ + -keystore $KEYSTORE \ + -keyalg RSA \ + -storepass "${CN_KEYSTORE_PASSWORD}" \ + -storetype pkcs12 + + echo "# $CN: Generate Truststore" + keytool -noprompt -keystore \ + $TRUSTSTORE -alias caroot -import \ + -file $CA_ROOT_CRT -storepass "${TRUSTSTORE_PASSWORD}" + + echo "# $CN: Generate CSR" + keytool -keystore $KEYSTORE -alias $CN \ + -certreq -file $CN.csr -storepass "${CN_KEYSTORE_PASSWORD}" \ + -keypass "${CN_KEYSTORE_PASSWORD}" \ + -ext "SAN=dns:$CN,dns:localhost" + + echo "# $CN: Generate extfile" + cat << EOF > extfile +[req] +distinguished_name = req_distinguished_name +x509_extensions = v3_req +prompt = no +[req_distinguished_name] +CN = $CN +[v3_req] +subjectAltName = @alt_names +[alt_names] +DNS.1 = $CN +DNS.2 = localhost +EOF + + echo "# $CN: Sign the certificate with the CA" + openssl x509 -req -CA $CA_ROOT_CRT -CAkey $CA_ROOT_KEY \ + -in $CN.csr \ + -out $CN-ca-signed.crt -days 9999 \ + -CAcreateserial -passin "pass:${CA_PASSWORD}" \ + -extensions v3_req -extfile extfile + + echo "# $CN: Import root certificate" + keytool -noprompt -keystore $KEYSTORE \ + -alias caroot -import -file $CA_ROOT_CRT -storepass "${CN_KEYSTORE_PASSWORD}" + + echo "# $CN: Import signed certificate" + keytool -noprompt -keystore $KEYSTORE -alias $CN \ + -import -file $SIGNED_CRT -storepass "${CN_KEYSTORE_PASSWORD}" \ + -ext "SAN=dns:$CN,dns:localhost" + + echo "# $CN: Export PEM certificate" + openssl pkcs12 -in "$KEYSTORE" -out "$CERTIFICATE" \ + -nodes -passin "pass:${CN_KEYSTORE_PASSWORD}" + + echo "# $CN: Export PEM key" + openssl pkcs12 -in "$KEYSTORE" -out "$KEY" \ + -nocerts -passin "pass:${CN_KEYSTORE_PASSWORD}" \ + -passout "pass:${CN_KEYSTORE_PASSWORD}" +done diff -Nru librdkafka-1.9.2/tests/fixtures/ssl/.gitignore librdkafka-2.0.2/tests/fixtures/ssl/.gitignore --- librdkafka-1.9.2/tests/fixtures/ssl/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/fixtures/ssl/.gitignore 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,11 @@ +*.key +*.crt +*.jks +*.csr +*.pem +*.p12 +*.srl +extfile +!client.keystore.p12 +!client2.certificate.pem +!client2.key diff -Nru librdkafka-1.9.2/tests/fixtures/ssl/Makefile librdkafka-2.0.2/tests/fixtures/ssl/Makefile --- librdkafka-1.9.2/tests/fixtures/ssl/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/fixtures/ssl/Makefile 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,8 @@ +ssl_keys: clear_keys + @./create_keys.sh client client2 + +clear_keys: + @rm -f *.key *.crt *.jks \ + *.csr *.pem *.p12 *.srl extfile + +.PHONY: ssl_keys diff -Nru librdkafka-1.9.2/tests/fixtures/ssl/README.md librdkafka-2.0.2/tests/fixtures/ssl/README.md --- librdkafka-1.9.2/tests/fixtures/ssl/README.md 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/tests/fixtures/ssl/README.md 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,13 @@ +# SSL keys generation for tests + +The Makefile in this directory generates a PKCS#12 keystore +and corresponding PEM certificate and key for testing +SSL keys and keystore usage in librdkafka. + +To update those files with a newer OpenSSL version, just run `make`. + +# Requirements + +* OpenSSL >= 1.1.1 +* Java keytool >= Java 11 +* GNU Make >= 4.2 \ No newline at end of file diff -Nru librdkafka-1.9.2/tests/interactive_broker_version.py librdkafka-2.0.2/tests/interactive_broker_version.py --- librdkafka-1.9.2/tests/interactive_broker_version.py 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/interactive_broker_version.py 2023-01-20 09:14:36.000000000 +0000 @@ -160,16 +160,16 @@ key['password']).encode('ascii')) for k, v in ssl.ca.items(): - cmd_env['RDK_SSL_ca_{}'.format(k)] = v + cmd_env['SSL_ca_{}'.format(k)] = v # Set envs for all generated keys so tests can find them. for k, v in key.items(): if isinstance(v, dict): for k2, v2 in v.items(): - # E.g. "RDK_SSL_priv_der=path/to/librdkafka-priv.der" - cmd_env['RDK_SSL_{}_{}'.format(k, k2)] = v2 + # E.g. "SSL_priv_der=path/to/librdkafka-priv.der" + cmd_env['SSL_{}_{}'.format(k, k2)] = v2 else: - cmd_env['RDK_SSL_{}'.format(k)] = v + cmd_env['SSL_{}'.format(k)] = v # Define bootstrap brokers based on selected security protocol print('# Using client security.protocol=%s' % security_protocol) diff -Nru librdkafka-1.9.2/tests/LibrdkafkaTestApp.py librdkafka-2.0.2/tests/LibrdkafkaTestApp.py --- librdkafka-1.9.2/tests/LibrdkafkaTestApp.py 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/LibrdkafkaTestApp.py 2023-01-20 09:14:36.000000000 +0000 @@ -139,16 +139,16 @@ # Some tests need fine-grained access to various cert files, # set up the env vars accordingly. for k, v in ssl.ca.items(): - self.env_add('RDK_SSL_ca_{}'.format(k), v) + self.env_add('SSL_ca_{}'.format(k), v) # Set envs for all generated keys so tests can find them. for k, v in key.items(): if isinstance(v, dict): for k2, v2 in v.items(): - # E.g. "RDK_SSL_priv_der=path/to/librdkafka-priv.der" - self.env_add('RDK_SSL_{}_{}'.format(k, k2), v2) + # E.g. "SSL_priv_der=path/to/librdkafka-priv.der" + self.env_add('SSL_{}_{}'.format(k, k2), v2) else: - self.env_add('RDK_SSL_{}'.format(k), v) + self.env_add('SSL_{}'.format(k), v) if 'SASL' in self.security_protocol: self.security_protocol = 'SASL_SSL' diff -Nru librdkafka-1.9.2/tests/README.md librdkafka-2.0.2/tests/README.md --- librdkafka-1.9.2/tests/README.md 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/README.md 2023-01-20 09:14:36.000000000 +0000 @@ -23,14 +23,19 @@ may be specified by setting the `TRIVUP_ROOT` environment variable to alternate directory, e.g., `TRIVUP_ROOT=$HOME/trivup make full`. -First install trivup: +First install required Python packages (trivup with friends): - $ pip3 install trivup + $ python3 -m pip install -U -r requirements.txt Bring up a Kafka cluster (with the specified version) and start an interactive shell, when the shell is exited the cluster is brought down and deleted. - $ ./interactive_broker_version.py 2.3.0 # Broker version + $ python3 -m trivup.clusters.KafkaCluster 2.3.0 # Broker version + # You can also try adding: + # --ssl To enable SSL listeners + # --sasl To enable SASL authentication + # --sr To provide a Schema-Registry instance + # .. and so on, see --help for more. In the trivup shell, run the test suite: diff -Nru librdkafka-1.9.2/tests/requirements.txt librdkafka-2.0.2/tests/requirements.txt --- librdkafka-1.9.2/tests/requirements.txt 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/requirements.txt 2023-01-20 09:14:36.000000000 +0000 @@ -1,2 +1,2 @@ -trivup +trivup >= 0.12.1 jsoncomment diff -Nru librdkafka-1.9.2/tests/run-test.sh librdkafka-2.0.2/tests/run-test.sh --- librdkafka-1.9.2/tests/run-test.sh 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/run-test.sh 2023-01-20 09:14:36.000000000 +0000 @@ -126,7 +126,7 @@ if [ $RET -gt 0 ]; then echo -e "${RED}###" - echo -e "### Test $TEST in $mode mode FAILED! ###" + echo -e "### Test $TEST in $mode mode FAILED! (return code $RET) ###" echo -e "###${CCLR}" FAILED=1 else diff -Nru librdkafka-1.9.2/tests/test.c librdkafka-2.0.2/tests/test.c --- librdkafka-1.9.2/tests/test.c 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/test.c 2023-01-20 09:14:36.000000000 +0000 @@ -240,6 +240,12 @@ _TEST_DECL(0130_store_offsets); _TEST_DECL(0131_connect_timeout); _TEST_DECL(0132_strategy_ordering); +_TEST_DECL(0133_ssl_keys); +_TEST_DECL(0134_ssl_provider); +_TEST_DECL(0135_sasl_credentials); +_TEST_DECL(0136_resolve_cb); +_TEST_DECL(0137_barrier_batch_consume); +_TEST_DECL(0138_admin_mock); /* Manual tests */ _TEST_DECL(8000_idle); @@ -478,6 +484,12 @@ _TEST(0130_store_offsets, 0), _TEST(0131_connect_timeout, TEST_F_LOCAL), _TEST(0132_strategy_ordering, 0, TEST_BRKVER(2, 4, 0, 0)), + _TEST(0133_ssl_keys, TEST_F_LOCAL), + _TEST(0134_ssl_provider, TEST_F_LOCAL), + _TEST(0135_sasl_credentials, 0), + _TEST(0136_resolve_cb, TEST_F_LOCAL), + _TEST(0137_barrier_batch_consume, 0), + _TEST(0138_admin_mock, TEST_F_LOCAL, TEST_BRKVER(2, 4, 0, 0)), /* Manual tests */ _TEST(8000_idle, TEST_F_MANUAL), @@ -1393,15 +1405,18 @@ #else sql_fp = popen(test_sql_cmd, "w"); #endif - - fprintf(sql_fp, - "CREATE TABLE IF NOT EXISTS " - "runs(runid text PRIMARY KEY, mode text, " - "date datetime, cnt int, passed int, failed int, " - "duration numeric);\n" - "CREATE TABLE IF NOT EXISTS " - "tests(runid text, mode text, name text, state text, " - "extra text, duration numeric);\n"); + if (!sql_fp) + TEST_WARN("Failed to execute test.sql.command: %s", + test_sql_cmd); + else + fprintf(sql_fp, + "CREATE TABLE IF NOT EXISTS " + "runs(runid text PRIMARY KEY, mode text, " + "date datetime, cnt int, passed int, " + "failed int, duration numeric);\n" + "CREATE TABLE IF NOT EXISTS " + "tests(runid text, mode text, name text, " + "state text, extra text, duration numeric);\n"); } if (show_summary) @@ -4459,6 +4474,35 @@ return 0; } +/** + * @brief Compare two lists and their offsets, returning 0 if equal. + * + * @remark The lists may be sorted by this function. + */ +int test_partition_list_and_offsets_cmp(rd_kafka_topic_partition_list_t *al, + rd_kafka_topic_partition_list_t *bl) { + int i; + + if (al->cnt < bl->cnt) + return -1; + else if (al->cnt > bl->cnt) + return 1; + else if (al->cnt == 0) + return 0; + + rd_kafka_topic_partition_list_sort(al, NULL, NULL); + rd_kafka_topic_partition_list_sort(bl, NULL, NULL); + + for (i = 0; i < al->cnt; i++) { + const rd_kafka_topic_partition_t *a = &al->elems[i]; + const rd_kafka_topic_partition_t *b = &bl->elems[i]; + if (a->partition != b->partition || + strcmp(a->topic, b->topic) || a->offset != b->offset) + return -1; + } + + return 0; +} /** * @brief Execute script from the Kafka distribution bin/ path. @@ -5636,9 +5680,13 @@ size_t aclres_cnt = 0; int errcnt = 0; rd_kafka_resp_err_t err; - const rd_kafka_group_result_t **gres = NULL; - size_t gres_cnt = 0; - const rd_kafka_topic_partition_list_t *offsets = NULL; + const rd_kafka_group_result_t **gres = NULL; + size_t gres_cnt = 0; + const rd_kafka_ConsumerGroupDescription_t **gdescs = NULL; + size_t gdescs_cnt = 0; + const rd_kafka_error_t **glists_errors = NULL; + size_t glists_error_cnt = 0; + const rd_kafka_topic_partition_list_t *offsets = NULL; rkev = test_wait_admin_result(q, evtype, tmout); @@ -5700,7 +5748,24 @@ rd_kafka_event_name(rkev)); aclres = rd_kafka_CreateAcls_result_acls(res, &aclres_cnt); + } else if (evtype == RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) { + const rd_kafka_ListConsumerGroups_result_t *res; + if (!(res = rd_kafka_event_ListConsumerGroups_result(rkev))) + TEST_FAIL( + "Expected a ListConsumerGroups result, not %s", + rd_kafka_event_name(rkev)); + glists_errors = rd_kafka_ListConsumerGroups_result_errors( + res, &glists_error_cnt); + } else if (evtype == RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) { + const rd_kafka_DescribeConsumerGroups_result_t *res; + if (!(res = rd_kafka_event_DescribeConsumerGroups_result(rkev))) + TEST_FAIL( + "Expected a DescribeConsumerGroups result, not %s", + rd_kafka_event_name(rkev)); + + gdescs = rd_kafka_DescribeConsumerGroups_result_groups( + res, &gdescs_cnt); } else if (evtype == RD_KAFKA_EVENT_DELETEGROUPS_RESULT) { const rd_kafka_DeleteGroups_result_t *res; if (!(res = rd_kafka_event_DeleteGroups_result(rkev))) @@ -5771,6 +5836,30 @@ } } + /* Check list groups errors */ + for (i = 0; i < glists_error_cnt; i++) { + const rd_kafka_error_t *error = glists_errors[i]; + TEST_WARN("%s error: %s\n", rd_kafka_event_name(rkev), + rd_kafka_error_string(error)); + if (!(errcnt++)) + err = rd_kafka_error_code(error); + } + + /* Check describe groups errors */ + for (i = 0; i < gdescs_cnt; i++) { + const rd_kafka_error_t *error; + if ((error = + rd_kafka_ConsumerGroupDescription_error(gdescs[i]))) { + TEST_WARN("%s result: %s: error: %s\n", + rd_kafka_event_name(rkev), + rd_kafka_ConsumerGroupDescription_group_id( + gdescs[i]), + rd_kafka_error_string(error)); + if (!(errcnt++)) + err = rd_kafka_error_code(error); + } + } + /* Check group errors */ for (i = 0; i < gres_cnt; i++) { const rd_kafka_topic_partition_list_t *parts; @@ -6064,7 +6153,7 @@ TEST_SAY("Deleting %" PRIusz " groups\n", group_cnt); - rd_kafka_DeleteGroups(rk, del_groups, group_cnt, options, useq); + rd_kafka_DeleteGroups(rk, del_groups, group_cnt, options, q); rd_kafka_AdminOptions_destroy(options); @@ -6079,8 +6168,6 @@ rd_kafka_queue_destroy(q); - rd_kafka_DeleteGroup_destroy_array(del_groups, group_cnt); - if (err) TEST_FAIL("Failed to delete groups: %s", rd_kafka_err2str(err)); @@ -6712,6 +6799,8 @@ return 0; } + test_curr->subtest_quick = is_quick; + TIMING_START(&test_curr->subtest_duration, "SUBTEST"); TEST_SAY(_C_MAG "[ %s ]\n", test_curr->subtest); @@ -6744,6 +6833,16 @@ (float)(TIMING_DURATION(&test_curr->subtest_duration) / 1000000.0f)); + if (test_curr->subtest_quick && test_quick && !test_on_ci && + TIMING_DURATION(&test_curr->subtest_duration) > 45 * 1000 * 1000) + TEST_WARN( + "Subtest %s marked as QUICK but took %.02fs to " + "finish: either fix the test or " + "remove the _QUICK identifier (limit is 45s)\n", + test_curr->subtest, + (float)(TIMING_DURATION(&test_curr->subtest_duration) / + 1000000.0f)); + test_sub_reset(); } diff -Nru librdkafka-1.9.2/tests/test.h librdkafka-2.0.2/tests/test.h --- librdkafka-1.9.2/tests/test.h 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/tests/test.h 2023-01-20 09:14:36.000000000 +0000 @@ -156,6 +156,7 @@ char failstr[512 + 1]; /**< First test failure reason */ char subtest[400]; /**< Current subtest, if any */ test_timing_t subtest_duration; /**< Subtest duration timing */ + rd_bool_t subtest_quick; /**< Subtest is marked as QUICK */ #if WITH_SOCKEM rd_list_t sockets; @@ -680,6 +681,8 @@ const rd_kafka_topic_partition_list_t *partitions); int test_partition_list_cmp(rd_kafka_topic_partition_list_t *al, rd_kafka_topic_partition_list_t *bl); +int test_partition_list_and_offsets_cmp(rd_kafka_topic_partition_list_t *al, + rd_kafka_topic_partition_list_t *bl); void test_kafka_topics(const char *fmt, ...); void test_admin_create_topic(rd_kafka_t *use_rk, diff -Nru librdkafka-1.9.2/.travis.yml librdkafka-2.0.2/.travis.yml --- librdkafka-1.9.2/.travis.yml 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,170 +0,0 @@ -language: c -dist: xenial -cache: ccache - -addons: - apt: - packages: - - python3 - - python3-pip - - python3-setuptools - # required by openssl installer - - perl - -env: - global: - - secure: "q7DQ6KCiQyMEpBf8mxPFl6hY9JEoaOUdIaLh1IuYn5TctiNIA+J6O/bL/dyDSy2Yjor61WAiiMOh77eMykm1wPl72kqjR97ui0uCq7BQQn4MWtKrXXi0eWLF3bYt2FbUGJZvrM0xeoWzSYT6np7CKu8ssgL8Fvr4bmf152IpdQ8=" - - secure: "XpFExynXwbSr6vTuGsZVyqF4sti+UmRxX2sztjpTdaIH0yo60d6KYT0SRW7BLdZNA6/XI1l1GPTAwcDwTM1XasnnFrD7i88uZsAneA/xEgZTGXtnVVWPJAcVoX/75Rxeibc8CfSc5MO9QmBMiGGuI3S6HHCj4RzCJacBhOjIhfA=" - -matrix: - include: - - name: "Linux GCC: +Debian packages +BuiltinRegex +Strict" - if: tag IS present - os: linux - compiler: gcc - env: - - ADDITIONAL_BUILDS="debian" - - ADDITIONAL_BUILD_FLAGS="--source-deps-only" - - LINKAGE=std - before_script: - - ./configure --install-deps --disable-lz4-ext --disable-regex-ext --prefix="$PWD/dest" --enable-strip - - - name: "RPM packages" - if: tag IS present - os: linux - compiler: gcc - env: ADDITIONAL_BUILDS="centos" SKIP_MAKE=y - - - name: "Linux clang: +alpine +manylinux +werror" - os: linux - compiler: clang - env: ADDITIONAL_BUILDS="alpine manylinux2010_x86_64" ADDITIONAL_BUILD_FLAGS="--source-deps-only" LINKAGE=std - before_script: - - ./configure --install-deps --disable-lz4-ext --prefix="$PWD/dest" --enable-werror --enable-strip - - - name: "Linux clang: +static +alpine-static -gssapi" - if: tag IS present - os: linux - compiler: clang - env: ADDITIONAL_BUILDS="alpine-static" LINKAGE=static - before_script: - - ./configure --enable-static --install-deps --source-deps-only --disable-gssapi --disable-lz4-ext --prefix="$PWD/dest" --enable-strip - - - name: "OSX GCC" - if: tag IS PRESENT - os: osx - compiler: gcc - env: LINKAGE=std HOMEBREW_NO_AUTO_UPDATE=1 - before_script: - - ./configure --install-deps --source-deps-only --disable-lz4-ext --prefix="$PWD/dest" --enable-strip - - - name: "OSX clang: +static" - if: tag IS PRESENT - os: osx - compiler: clang - env: LINKAGE=static HOMEBREW_NO_AUTO_UPDATE=1 - before_script: - - ./configure --install-deps --source-deps-only --disable-lz4-ext --prefix="$PWD/dest" --enable-static --enable-strip - - - name: "Windows MinGW-w64 Dynamic" - if: tag IS PRESENT - os: windows - env: - - LINKAGE=std - - SKIP_MAKE=y - before_install: - - source ./packaging/mingw-w64/travis-before-install.sh - before_script: - - ./packaging/mingw-w64/configure-build-msys2-mingw.sh - - - name: "Windows MinGW-w64 Static" - os: windows - env: - - LINKAGE=static - - SKIP_MAKE=y - before_install: - - source ./packaging/mingw-w64/travis-before-install.sh - before_script: - - ./packaging/mingw-w64/configure-build-msys2-mingw-static.sh - - ./packaging/mingw-w64/run-tests.sh - - - name: "Linux GCC: +integration-tests +copyright-check +doc-check +devel +code-cov +c99 +c++98" - os: linux - dist: xenial - language: python - python: 3.8 - compiler: gcc - env: NO_ARTIFACTS=y RUN_INTEGRATION_TESTS=y COPYRIGHT_CHECK=y DOC_CHECK=y - before_script: - - wget -O rapidjson-dev.deb https://launchpad.net/ubuntu/+archive/primary/+files/rapidjson-dev_1.1.0+dfsg2-3_all.deb - - sudo dpkg -i rapidjson-dev.deb - - python -m pip install -U pip - - python -m pip -V - - python -m pip install -r tests/requirements.txt - - sudo apt update - - sudo apt install -y doxygen graphviz gdb - - ./configure --CFLAGS="-std=c99" --CXXFLAGS="-std=c++98" --install-deps --enable-devel --disable-lz4-ext --prefix="$PWD/dest" - - ./packaging/tools/rdutcoverage.sh - - - name: "Linux GCC arm64: +static -gssapi" - os: linux - arch: arm64 - dist: bionic - compiler: gcc - env: LINKAGE=std - before_script: - - ./configure --disable-gssapi --install-deps --source-deps-only --enable-static --disable-lz4-ext --prefix="$PWD/dest" --enable-strip - - - name: "Linux GCC s390x: +devel" - if: tag IS PRESENT - os: linux - arch: s390x - dist: bionic - compiler: gcc - env: NO_ARTIFACTS=y - before_script: - - sudo apt install -y gdb - - ./configure --enable-devel --disable-lz4-ext --prefix="$PWD/dest" - -install: - - ccache -s || echo "CCache is not available." - - rm -rf artifacts dest - - mkdir dest artifacts - - if [[ $TRAVIS_OS_NAME == "linux" ]]; then sudo apt update || true; fi - - if [[ $TRAVIS_DIST == "trusty" || $TRAVIS_DIST == "xenial" ]]; then sudo apt-get install -y libssl1.0.0 libssl-dev ; fi - - if [[ $TRAVIS_DIST == "bionic" || $TRAVIS_DIST == "focal" ]]; then sudo apt-get install -y libssl1.1 libssl-dev ; fi - - if [[ -n $DOCKER_PASSWORD && $TRAVIS_OS_NAME == "linux" ]]; then echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin ; fi - -before_cache: - - if [[ $TRAVIS_OS_NAME == windows ]]; then $msys2 pacman --sync --clean --noconfirm ; fi - -script: -- if [[ $SKIP_MAKE != y ]]; then (make -j2 all examples check && make -j2 -C tests build) || travis_terminate 1 ; fi -# Tag: Run quick local test suite on -# No tag: Run unit tests. -- if [[ $SKIP_MAKE != y && $RUN_INTEGRATION_TESTS != y ]]; then if [[ -n $TRAVIS_TAG ]]; then make -C tests run_local_quick; else make -C tests unit ; fi ; fi -- if [[ $SKIP_MAKE != y ]]; then make install || travis_terminate 1 ; fi -- if [[ -z $NO_ARTIFACTS ]]; then (cd dest && tar cvzf ../artifacts/librdkafka-${CC}.tar.gz .) ; fi -- if [[ -n $TRAVIS_TAG ]]; then for distro in $ADDITIONAL_BUILDS ; do packaging/tools/distro-build.sh $distro $ADDITIONAL_BUILD_FLAGS --enable-strip || travis_terminate 1 ; done ; fi -- if [[ $COPYRIGHT_CHECK == y ]]; then make copyright-check || travis_terminate 1; fi -- if [[ $DOC_CHECK == y ]]; then make docs || travis_terminate 1 ; fi -- if [[ -z $TRAVIS_TAG && $RUN_INTEGRATION_TESTS == y ]]; then (cd tests && travis_retry ./interactive_broker_version.py -c "make quick" 2.8.1) || travis_terminate 1 ; fi -- if [[ -f tests/core ]] && (which gdb >/dev/null); then (cd tests && LD_LIBRARY_PATH=../src:../src-cpp gdb ./test-runner core < backtrace.gdb) ; fi -- sha256sum artifacts/* || true - -deploy: - provider: s3 - access_key_id: - secure: "sRsKY1YoPDb3b+9hHnBv4tDSdyB/FraYEKI1/+aKmqWxvOI6xYYFFP0Tvn6f4Rgk0wzYmxO/5V+cR+fmKxVhb1pItFXOdVqML0ilOTP5gtlOPUeHu9fytqw3q7GgMV8JR75g60BNVko9vZegtd2LIq6FWzAIvPSUJOAw7qekjGU=" - secret_access_key: - secure: "ZDjH6Z9CJr2yo7Splm+0xpo30QbO+cpeqxFUn1d9XOyLZQ0dapr6iboxdPlJaCOIhqVUWXS0IJgFwCW+5vWb9Za6tFumP1MtJGiwE6bqr820G8E02umwSvbNijr44h+EyxQcxP71Ljjk22Pfu7SLKWqMJ/iIzcYe6Z6Sz8obSWA=" - bucket: librdkafka-ci-packages - region: us-west-1 - skip_cleanup: true - local-dir: artifacts - upload-dir: librdkafka/p-librdkafka__bld-travis__plat-${TRAVIS_OS_NAME}__arch-${TRAVIS_CPU_ARCH}__tag-${TRAVIS_TAG}__sha-${TRAVIS_COMMIT}__bid-${TRAVIS_JOB_NUMBER}__lnk-${LINKAGE} - on: - repo: edenhill/librdkafka - all_branches: true - tags: true - condition: $NO_ARTIFACTS != y diff -Nru librdkafka-1.9.2/vcpkg.json librdkafka-2.0.2/vcpkg.json --- librdkafka-1.9.2/vcpkg.json 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/vcpkg.json 2023-01-20 09:14:36.000000000 +0000 @@ -1,25 +1,23 @@ { "name": "librdkafka", - "version": "1.9.2", + "version": "2.0.2", "dependencies": [ { "name": "zstd", - "version>=": "1.5.0" + "version>=": "1.5.2" }, { "name": "zlib", - "version>=": "1.2.12" + "version>=": "1.2.13" }, { - "name": "openssl" + "name": "openssl", + "version>=": "3.0.7" }, { "name": "curl", - "version>=": "7.84.0" + "version>=": "7.86.0" } ], - "overrides": [ - { "name": "openssl", "version-string": "1.1.1n" } - ], - "builtin-baseline": "de176433e9a8769eed0e43d61758f4cdc1dc6e20" + "builtin-baseline": "56765209ec0e92c58a5fd91aa09c46a16d660026" } diff -Nru librdkafka-1.9.2/win32/interceptor_test/interceptor_test.vcxproj librdkafka-2.0.2/win32/interceptor_test/interceptor_test.vcxproj --- librdkafka-1.9.2/win32/interceptor_test/interceptor_test.vcxproj 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/interceptor_test/interceptor_test.vcxproj 2023-01-20 09:14:36.000000000 +0000 @@ -5,7 +5,7 @@ Win32Proj interceptor_test interceptor_test - 8.1 + 10.0 DynamicLibrary @@ -84,4 +84,4 @@ - \ No newline at end of file + diff -Nru librdkafka-1.9.2/win32/librdkafka.autopkg.template librdkafka-2.0.2/win32/librdkafka.autopkg.template --- librdkafka-1.9.2/win32/librdkafka.autopkg.template 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/librdkafka.autopkg.template 2023-01-20 09:14:36.000000000 +0000 @@ -25,7 +25,7 @@ summary: "The Apache Kafka C/C++ client library"; description:"The Apache Kafka C/C++ client library"; releaseNotes: "Release of librdkafka"; - copyright: "Copyright 2012-2021"; + copyright: "Copyright 2012-2022"; tags: { native, kafka, librdkafka, C, C++ }; }; diff -Nru librdkafka-1.9.2/win32/librdkafkacpp/librdkafkacpp.vcxproj librdkafka-2.0.2/win32/librdkafkacpp/librdkafkacpp.vcxproj --- librdkafka-1.9.2/win32/librdkafkacpp/librdkafkacpp.vcxproj 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/librdkafkacpp/librdkafkacpp.vcxproj 2023-01-20 09:14:36.000000000 +0000 @@ -5,7 +5,7 @@ Win32Proj librdkafkacpp librdkafkacpp - 8.1 + 10.0 DynamicLibrary diff -Nru librdkafka-1.9.2/win32/librdkafka.vcxproj librdkafka-2.0.2/win32/librdkafka.vcxproj --- librdkafka-1.9.2/win32/librdkafka.vcxproj 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/librdkafka.vcxproj 2023-01-20 09:14:36.000000000 +0000 @@ -4,7 +4,7 @@ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54} Win32Proj librdkafka - 8.1 + 10.0 DynamicLibrary @@ -229,6 +229,7 @@ + diff -Nru librdkafka-1.9.2/win32/msbuild.ps1 librdkafka-2.0.2/win32/msbuild.ps1 --- librdkafka-1.9.2/win32/msbuild.ps1 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/win32/msbuild.ps1 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,15 @@ +param( + [string]$config='Release', + [string]$platform='x64', + [string]$toolset='v142' +) + +$msbuild = (& "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -latest -prerelease -products * -requires Microsoft.Component.MSBuild -find MSBuild\**\Bin\MSBuild.exe) + +echo "Using msbuild $msbuild" + +echo "Cleaning $config $platform $toolset" +& $msbuild win32\librdkafka.sln /p:Configuration=$config /p:Platform=$platform /p:PlatformToolset=$toolset /target:Clean + +echo "Building $config $platform $toolset" +& $msbuild win32\librdkafka.sln /p:Configuration=$config /p:Platform=$platform /p:PlatformToolset=$toolset diff -Nru librdkafka-1.9.2/win32/openssl_engine_example/openssl_engine_example.vcxproj librdkafka-2.0.2/win32/openssl_engine_example/openssl_engine_example.vcxproj --- librdkafka-1.9.2/win32/openssl_engine_example/openssl_engine_example.vcxproj 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/openssl_engine_example/openssl_engine_example.vcxproj 2023-01-20 09:14:36.000000000 +0000 @@ -9,7 +9,7 @@ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7} Win32Proj openssl_engine_example - 8.1 + 10.0 Application diff -Nru librdkafka-1.9.2/win32/package-nuget.ps1 librdkafka-2.0.2/win32/package-nuget.ps1 --- librdkafka-1.9.2/win32/package-nuget.ps1 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/package-nuget.ps1 2023-01-20 09:14:36.000000000 +0000 @@ -16,6 +16,15 @@ Requires CoApp #> +param( + [string]$version='0.0.0', + [string]$destdir='.\artifacts' +) +$autopkgFile = "win32/librdkafka.autopkg" +cat ($autopkgFile + ".template") | % { $_ -replace "@version", $version } > $autopkgFile + +Write-NuGetPackage $autopkgFile + +Move-Item -Path .\*.nupkg -Destination $destdir -Write-NuGetPackage librdkafka.autopkg diff -Nru librdkafka-1.9.2/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj librdkafka-2.0.2/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj --- librdkafka-1.9.2/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj 2023-01-20 09:14:36.000000000 +0000 @@ -4,7 +4,7 @@ {88B682AB-5082-49D5-A672-9904C5F43ABB} Win32Proj rdkafka_complex_consumer_example_cpp - 8.1 + 10.0 @@ -64,4 +64,4 @@ - \ No newline at end of file + diff -Nru librdkafka-1.9.2/win32/rdkafka_example/rdkafka_example.vcxproj librdkafka-2.0.2/win32/rdkafka_example/rdkafka_example.vcxproj --- librdkafka-1.9.2/win32/rdkafka_example/rdkafka_example.vcxproj 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/rdkafka_example/rdkafka_example.vcxproj 2023-01-20 09:14:36.000000000 +0000 @@ -4,7 +4,7 @@ {84585784-5BDC-43BE-B714-23EA2E7AEA5B} Win32Proj rdkafka_example - 8.1 + 10.0 @@ -94,4 +94,4 @@ - \ No newline at end of file + diff -Nru librdkafka-1.9.2/win32/rdkafka_performance/rdkafka_performance.vcxproj librdkafka-2.0.2/win32/rdkafka_performance/rdkafka_performance.vcxproj --- librdkafka-1.9.2/win32/rdkafka_performance/rdkafka_performance.vcxproj 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/rdkafka_performance/rdkafka_performance.vcxproj 2023-01-20 09:14:36.000000000 +0000 @@ -4,7 +4,7 @@ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC} Win32Proj rdkafka_performance - 8.1 + 10.0 @@ -94,4 +94,4 @@ - \ No newline at end of file + diff -Nru librdkafka-1.9.2/win32/setup-msys2.ps1 librdkafka-2.0.2/win32/setup-msys2.ps1 --- librdkafka-1.9.2/win32/setup-msys2.ps1 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/win32/setup-msys2.ps1 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,31 @@ +# Install (if necessary) and set up msys2. + + +$url="https://github.com/msys2/msys2-installer/releases/download/2022-10-28/msys2-base-x86_64-20221028.sfx.exe" +$sha256="e365b79b4b30b6f4baf34bd93f3d2a41c0a92801c7a96d79cddbfca1090a0554" + + +if (!(Test-Path -Path "c:\msys64\usr\bin\bash.exe")) { + echo "Downloading and installing msys2 to c:\msys64" + + (New-Object System.Net.WebClient).DownloadFile($url, './msys2-installer.exe') + + # Verify checksum + (Get-FileHash -Algorithm "SHA256" .\msys2-installer.exe).hash -eq $sha256 + + # Install msys2 + .\msys2-installer.exe -y -oc:\ + + Remove-Item msys2-installer.exe + + # Set up msys2 the first time + echo "Setting up msys" + c:\msys64\usr\bin\bash -lc ' ' + +} else { + echo "Using previously installed msys2" +} + +# Update packages +echo "Updating msys2 packages" +c:\msys64\usr\bin\bash -lc "pacman --noconfirm -Syuu --overwrite '*'" diff -Nru librdkafka-1.9.2/win32/setup-vcpkg.ps1 librdkafka-2.0.2/win32/setup-vcpkg.ps1 --- librdkafka-1.9.2/win32/setup-vcpkg.ps1 1970-01-01 00:00:00.000000000 +0000 +++ librdkafka-2.0.2/win32/setup-vcpkg.ps1 2023-01-20 09:14:36.000000000 +0000 @@ -0,0 +1,13 @@ +# Set up vcpkg and install required packages. + +if (!(Test-Path -Path vcpkg/.git)) { + git clone https://github.com/Microsoft/vcpkg.git +} else { + echo "Updating vcpkg git repo" + cd vcpkg + git pull + cd .. +} + +.\vcpkg\bootstrap-vcpkg.bat + diff -Nru librdkafka-1.9.2/win32/tests/tests.vcxproj librdkafka-2.0.2/win32/tests/tests.vcxproj --- librdkafka-1.9.2/win32/tests/tests.vcxproj 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/tests/tests.vcxproj 2023-01-20 09:14:36.000000000 +0000 @@ -4,7 +4,7 @@ {BE4E1264-5D13-423D-8191-71F7041459E7} Win32Proj tests - 8.1 + 10.0 @@ -213,6 +213,12 @@ + + + + + + diff -Nru librdkafka-1.9.2/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj librdkafka-2.0.2/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj --- librdkafka-1.9.2/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj 2022-08-01 17:09:58.000000000 +0000 +++ librdkafka-2.0.2/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj 2023-01-20 09:14:36.000000000 +0000 @@ -9,7 +9,7 @@ {1A64A271-4840-4686-9F6F-F5AF0F7C385A} Win32Proj win_ssl_cert_store - 8.1 + 10.0 Application @@ -129,4 +129,4 @@ - \ No newline at end of file +