diff --git a/.github/actions/clean-disk/action.yml b/.github/actions/clean-disk/action.yml index 8bcc5f1396802..d74c3f25fc64c 100644 --- a/.github/actions/clean-disk/action.yml +++ b/.github/actions/clean-disk/action.yml @@ -31,7 +31,7 @@ runs: directories=(/usr/local/lib/android /opt/ghc) if [[ "${{ inputs.mode }}" == "full" ]]; then # remove these directories only when mode is 'full' - directories+=(/usr/share/dotnet) + directories+=(/usr/share/dotnet /opt/hostedtoolcache/CodeQL) fi emptydir=/tmp/empty$$/ mkdir $emptydir diff --git a/.github/workflows/pulsar-ci.yaml b/.github/workflows/pulsar-ci.yaml index 64b85cb14c580..32e5ef7874d35 100644 --- a/.github/workflows/pulsar-ci.yaml +++ b/.github/workflows/pulsar-ci.yaml @@ -22,6 +22,7 @@ on: pull_request: branches: - master + - branch-3.1 schedule: - cron: '0 12 * * *' workflow_dispatch: @@ -189,6 +190,8 @@ jobs: group: BROKER_GROUP_2 - name: Brokers - Broker Group 3 group: BROKER_GROUP_3 + - name: Brokers - Broker Group 4 + group: BROKER_GROUP_4 - name: Brokers - Client Api group: BROKER_CLIENT_API - name: Brokers - Client Impl @@ -746,6 +749,8 @@ jobs: - name: Clean Disk uses: ./.github/actions/clean-disk + with: + mode: full - name: Cache local Maven repository uses: actions/cache@v3 @@ -861,6 +866,7 @@ jobs: - name: Pulsar IO group: PULSAR_IO + clean_disk: true - name: Sql group: SQL @@ -872,6 +878,10 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm + - name: Clean Disk when needed + if: ${{ matrix.clean_disk }} + uses: ./.github/actions/clean-disk + - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }} @@ -1072,6 +1082,7 @@ jobs: - name: Pulsar IO - Oracle group: PULSAR_IO_ORA + clean_disk: true steps: - name: checkout @@ -1080,6 +1091,10 @@ jobs: - name: Tune Runner VM uses: ./.github/actions/tune-runner-vm + - name: Clean Disk when needed + if: ${{ matrix.clean_disk }} + uses: ./.github/actions/clean-disk + - name: Setup ssh access to build runner VM # ssh access is enabled for builds in own forks if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }} diff --git a/.idea/vcs.xml b/.idea/vcs.xml index 99fce8b2b9812..951f00c3cc3b8 100644 --- a/.idea/vcs.xml +++ b/.idea/vcs.xml @@ -1,4 +1,4 @@ - + - - - - - - + + + + + + diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index 872764f899827..ca4639f8053e0 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -1,4 +1,4 @@ - + - + com.gradle gradle-enterprise-maven-extension diff --git a/.mvn/gradle-enterprise.xml b/.mvn/gradle-enterprise.xml index 2667402c23cdb..c8626080d0866 100644 --- a/.mvn/gradle-enterprise.xml +++ b/.mvn/gradle-enterprise.xml @@ -1,4 +1,4 @@ - + - + https://ge.apache.org false diff --git a/bouncy-castle/bc/pom.xml b/bouncy-castle/bc/pom.xml index 7ea333a4711a7..632a83b9fe731 100644 --- a/bouncy-castle/bc/pom.xml +++ b/bouncy-castle/bc/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative bouncy-castle-parent 3.1.0-SNAPSHOT .. - bouncy-castle-bc Apache Pulsar :: Bouncy Castle :: BC - ${project.groupId} @@ -39,23 +36,19 @@ ${project.version} provided - org.bouncycastle bcpkix-jdk18on ${bouncycastle.version} - org.bouncycastle bcprov-ext-jdk18on ${bouncycastle.version} - - de.ntcomputer @@ -73,7 +66,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/bouncy-castle/bcfips-include-test/pom.xml b/bouncy-castle/bcfips-include-test/pom.xml index e8348be9292cd..7c102ee9e23e3 100644 --- a/bouncy-castle/bcfips-include-test/pom.xml +++ b/bouncy-castle/bcfips-include-test/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative bouncy-castle-parent 3.1.0-SNAPSHOT .. - bcfips-include-test Pulsar Bouncy Castle FIPS Test Broker and client runs auth include BC FIPS verison - ${project.groupId} @@ -39,7 +37,6 @@ ${project.version} test - ${project.groupId} pulsar-broker @@ -53,7 +50,6 @@ test-jar test - ${project.groupId} pulsar-broker @@ -66,7 +62,6 @@ test - ${project.groupId} @@ -74,7 +69,6 @@ ${project.version} pkg - diff --git a/bouncy-castle/bcfips/pom.xml b/bouncy-castle/bcfips/pom.xml index a07e5e19907f2..645074feab382 100644 --- a/bouncy-castle/bcfips/pom.xml +++ b/bouncy-castle/bcfips/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative bouncy-castle-parent 3.1.0-SNAPSHOT .. - bouncy-castle-bcfips Apache Pulsar :: Bouncy Castle :: BC-FIPS - ${project.groupId} @@ -39,20 +36,17 @@ ${project.version} provided - org.bouncycastle bc-fips ${bouncycastle.bc-fips.version} - org.bouncycastle bcpkix-fips ${bouncycastle.bcpkix-fips.version} - diff --git a/bouncy-castle/pom.xml b/bouncy-castle/pom.xml index daefeb83b5371..f69048ede3265 100644 --- a/bouncy-castle/pom.xml +++ b/bouncy-castle/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - @@ -42,16 +41,13 @@ - bouncy-castle-parent Apache Pulsar :: Bouncy Castle :: Parent - bc bcfips - bcfips-include-test diff --git a/build/run_unit_group.sh b/build/run_unit_group.sh index 69434b011b37e..17d0efeed9937 100755 --- a/build/run_unit_group.sh +++ b/build/run_unit_group.sh @@ -87,6 +87,10 @@ function test_group_broker_group_3() { mvn_test -pl pulsar-broker -Dgroups='broker-admin' } +function test_group_broker_group_4() { + mvn_test -pl pulsar-broker -Dgroups='cluster-migration' +} + function test_group_broker_client_api() { mvn_test -pl pulsar-broker -Dgroups='broker-api' } diff --git a/buildtools/pom.xml b/buildtools/pom.xml index 836e1c5cb5f0e..6a717e788fe7b 100644 --- a/buildtools/pom.xml +++ b/buildtools/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache apache 29 - + - - org.apache.pulsar + io.streamnative buildtools 3.1.0-SNAPSHOT jar Pulsar Build Tools - - 2023-05-03T02:53:27Z + 2024-03-04T21:07:36Z 1.8 1.8 3.1.0 @@ -47,7 +44,7 @@ 4.1 8.37 3.1.2 - 4.1.94.Final + 4.1.100.Final 4.2.3 32.1.1-jre 1.10.12 @@ -60,7 +57,6 @@ --add-opens java.base/jdk.internal.platform=ALL-UNNAMED - @@ -72,9 +68,7 @@ - - org.yaml snakeyaml @@ -95,7 +89,6 @@ guice ${guice.version} - org.testng testng @@ -147,7 +140,6 @@ ${mockito.version} - @@ -256,6 +248,55 @@ **/proto/* + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + + + org.apache.maven.plugins + maven-source-plugin + 3.2.0 + + + attach-sources + + jar-no-fork + + + + + + maven-gpg-plugin + + + sign-artifacts + verify + + sign + + + + @@ -265,4 +306,14 @@ + + + ossrh + https://s01.oss.sonatype.org/content/repositories/snapshots + + + ossrh + https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/ + + diff --git a/buildtools/src/main/resources/log4j2.xml b/buildtools/src/main/resources/log4j2.xml index 184f58487eaf0..9e12d754fe7b5 100644 --- a/buildtools/src/main/resources/log4j2.xml +++ b/buildtools/src/main/resources/log4j2.xml @@ -1,4 +1,4 @@ - + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + diff --git a/buildtools/src/main/resources/pulsar/checkstyle.xml b/buildtools/src/main/resources/pulsar/checkstyle.xml index c63c8993408de..7a32584cc4dd6 100644 --- a/buildtools/src/main/resources/pulsar/checkstyle.xml +++ b/buildtools/src/main/resources/pulsar/checkstyle.xml @@ -1,4 +1,4 @@ - + - - + - - - - - - - - - - - - + + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + - - - - + IMPORT CHECKS - - - - + --> + + + + - - - - - + + + + + + + - - - - + + - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + } else --> - else --> - - - - - - - - - - - - - - - - - + + + + + + + + + + - - - - - - - - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - - - - + + + diff --git a/buildtools/src/main/resources/pulsar/suppressions.xml b/buildtools/src/main/resources/pulsar/suppressions.xml index 57a01c60f6a27..e558a8a0618fd 100644 --- a/buildtools/src/main/resources/pulsar/suppressions.xml +++ b/buildtools/src/main/resources/pulsar/suppressions.xml @@ -1,4 +1,4 @@ - + - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/conf/broker.conf b/conf/broker.conf index 40eb1bcc32485..2dbcff12b1610 100644 --- a/conf/broker.conf +++ b/conf/broker.conf @@ -538,6 +538,9 @@ brokerServiceCompactionThresholdInBytes=0 # If the execution time of the compaction phase one loop exceeds this time, the compaction will not proceed. brokerServiceCompactionPhaseOneLoopTimeInSeconds=30 +# Whether retain null-key message during topic compaction +topicCompactionRetainNullKey=true + # Whether to enable the delayed delivery for messages. # If disabled, messages will be immediately delivered and there will # be no tracking overhead. @@ -1372,7 +1375,9 @@ loadBalancerCPUResourceWeight=1.0 # The direct memory usage weight when calculating new resource usage. # It only takes effect in the ThresholdShedder strategy. -loadBalancerDirectMemoryResourceWeight=1.0 +# Direct memory usage cannot accurately reflect the machine's load, +# and it is not recommended to use it to score the machine's load. +loadBalancerDirectMemoryResourceWeight=0 # Bundle unload minimum throughput threshold (MB), avoiding bundle unload frequently. # It only takes effect in the ThresholdShedder strategy. diff --git a/conf/entry_location_rocksdb.conf b/conf/entry_location_rocksdb.conf index 31bd58506ef75..42d916ded378f 100644 --- a/conf/entry_location_rocksdb.conf +++ b/conf/entry_location_rocksdb.conf @@ -52,6 +52,8 @@ max_bytes_for_level_base=268435456 # set by jni: options.setTargetFileSizeBase target_file_size_base=67108864 + # set by jni: options.setLevelCompactionDynamicLevelBytes + level_compaction_dynamic_level_bytes=true [TableOptions/BlockBasedTable "default"] # set by jni: tableOptions.setBlockSize @@ -66,5 +68,3 @@ filter_policy=rocksdb.BloomFilter:10:false # set by jni: tableOptions.setCacheIndexAndFilterBlocks cache_index_and_filter_blocks=true - # set by jni: options.setLevelCompactionDynamicLevelBytes - level_compaction_dynamic_level_bytes=true \ No newline at end of file diff --git a/conf/filesystem_offload_core_site.xml b/conf/filesystem_offload_core_site.xml index d26cec2cc60f0..3c7e6d94bbe50 100644 --- a/conf/filesystem_offload_core_site.xml +++ b/conf/filesystem_offload_core_site.xml @@ -1,3 +1,4 @@ + - - - fs.defaultFS - - - - hadoop.tmp.dir - pulsar - - - io.file.buffer.size - 4096 - - - io.seqfile.compress.blocksize - 1000000 - - - io.seqfile.compression.type - BLOCK - - - io.map.index.interval - 128 - - + + + fs.defaultFS + + + + hadoop.tmp.dir + pulsar + + + io.file.buffer.size + 4096 + + + io.seqfile.compress.blocksize + 1000000 + + + io.seqfile.compression.type + BLOCK + + + io.map.index.interval + 128 + diff --git a/conf/functions_log4j2.xml b/conf/functions_log4j2.xml index 6902a3acd8736..a1dcc6a1f654a 100644 --- a/conf/functions_log4j2.xml +++ b/conf/functions_log4j2.xml @@ -1,3 +1,4 @@ + - pulsar-functions-instance - 30 - - - pulsar.log.appender - RollingFile - - - pulsar.log.level - info - - - bk.log.level - info - - - - - Console - SYSTEM_OUT - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - RollingFile - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.log - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}-%d{MM-dd-yyyy}-%i.log.gz - true - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - 1 - true - - - 1 GB - - - 0 0 0 * * ? - - - - - ${sys:pulsar.function.log.dir} - 2 - - */${sys:pulsar.function.log.file}*log.gz - - - 30d - - - - - - BkRollingFile - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk-%d{MM-dd-yyyy}-%i.log.gz - true - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - 1 - true - - - 1 GB - - - 0 0 0 * * ? - - - - - ${sys:pulsar.function.log.dir} - 2 - - */${sys:pulsar.function.log.file}.bk*log.gz - - - 30d - - - - - - - - org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper - ${sys:bk.log.level} - false - - BkRollingFile - - - - ${sys:pulsar.log.level} - - ${sys:pulsar.log.appender} - ${sys:pulsar.log.level} - - - + pulsar-functions-instance + 30 + + + pulsar.log.appender + RollingFile + + + pulsar.log.level + info + + + bk.log.level + info + + + + + Console + SYSTEM_OUT + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + RollingFile + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.log + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}-%d{MM-dd-yyyy}-%i.log.gz + true + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + 1 + true + + + 1 GB + + + 0 0 0 * * ? + + + + + ${sys:pulsar.function.log.dir} + 2 + + ${sys:pulsar.function.log.file}*log.gz + + + 30d + + + + + + BkRollingFile + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk-%d{MM-dd-yyyy}-%i.log.gz + true + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + 1 + true + + + 1 GB + + + 0 0 0 * * ? + + + + + ${sys:pulsar.function.log.dir} + 2 + + ${sys:pulsar.function.log.file}.bk*log.gz + + + 30d + + + + + + + + org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper + ${sys:bk.log.level} + false + + BkRollingFile + + + + ${sys:pulsar.log.level} + + ${sys:pulsar.log.appender} + ${sys:pulsar.log.level} + + + diff --git a/conf/functions_worker.yml b/conf/functions_worker.yml index 4c5b6aab1b7f4..3871c74a88778 100644 --- a/conf/functions_worker.yml +++ b/conf/functions_worker.yml @@ -43,6 +43,16 @@ metadataStoreOperationTimeoutSeconds: 30 # Metadata store cache expiry time in seconds metadataStoreCacheExpirySeconds: 300 +# Specifies if the function worker should use classloading for validating submissions for built-in +# connectors and functions. This is required for validateConnectorConfig to take effect. +# Default is false. +enableClassloadingOfBuiltinFiles: false + +# Specifies if the function worker should use classloading for validating submissions for external +# connectors and functions. This is required for validateConnectorConfig to take effect. +# Default is false. +enableClassloadingOfExternalFiles: false + ################################ # Function package management ################################ @@ -398,9 +408,20 @@ saslJaasServerRoleTokenSignerSecretPath: ######################## connectorsDirectory: ./connectors +# Whether to enable referencing connectors directory files by file url in connector (sink/source) creation +enableReferencingConnectorDirectoryFiles: true +# Regex patterns for enabling creation of connectors by referencing packages in matching http/https urls +additionalEnabledConnectorUrlPatterns: [] functionsDirectory: ./functions - -# Should connector config be validated during submission +# Whether to enable referencing functions directory files by file url in functions creation +enableReferencingFunctionsDirectoryFiles: true +# Regex patterns for enabling creation of functions by referencing packages in matching http/https urls +additionalEnabledFunctionsUrlPatterns: [] + +# Enables extended validation for connector config with fine-grain annotation based validation +# during submission. Classloading with either enableClassloadingOfExternalFiles or +# enableClassloadingOfBuiltinFiles must be enabled on the worker for this to take effect. +# Default is false. validateConnectorConfig: false # Whether to initialize distributed log metadata by runtime. diff --git a/conf/proxy.conf b/conf/proxy.conf index 2fda32abc1ba6..8285e1cb75320 100644 --- a/conf/proxy.conf +++ b/conf/proxy.conf @@ -28,17 +28,19 @@ metadataStoreUrl= # The metadata store URL for the configuration data. If empty, we fall back to use metadataStoreUrl configurationMetadataStoreUrl= -# If Service Discovery is Disabled this url should point to the discovery service provider. +# If does not set metadataStoreUrl or configurationMetadataStoreUrl, this url should point to the discovery service +# provider, and does not support multi urls yet. # The URL must begin with pulsar:// for plaintext or with pulsar+ssl:// for TLS. brokerServiceURL= brokerServiceURLTLS= -# These settings are unnecessary if `zookeeperServers` is specified +# If does not set metadataStoreUrl or configurationMetadataStoreUrl, this url should point to the discovery service +# provider, and does not support multi urls yet. brokerWebServiceURL= brokerWebServiceURLTLS= -# If function workers are setup in a separate cluster, configure the following 2 settings -# to point to the function workers cluster +# If function workers are setup in a separate cluster, configure the following 2 settings. This url should point to +# the discovery service provider of the function workers cluster, and does not support multi urls yet. functionWorkerWebServiceURL= functionWorkerWebServiceURLTLS= @@ -370,6 +372,8 @@ zooKeeperCacheExpirySeconds=-1 ### --- Metrics --- ### +# Whether to enable the proxy's /metrics and /proxy-stats http endpoints +enableProxyStatsEndpoints=true # Whether the '/metrics' endpoint requires authentication. Defaults to true authenticateMetricsEndpoint=true # Enable cache metrics data, default value is false diff --git a/conf/schema_example.conf b/conf/schema_example.json similarity index 100% rename from conf/schema_example.conf rename to conf/schema_example.json diff --git a/conf/standalone.conf b/conf/standalone.conf index 76223c5933e45..0b486bdaf0481 100644 --- a/conf/standalone.conf +++ b/conf/standalone.conf @@ -1277,4 +1277,7 @@ brokerInterceptorsDirectory=./interceptors brokerInterceptors= # Enable or disable the broker interceptor, which is only used for testing for now -disableBrokerInterceptors=true \ No newline at end of file +disableBrokerInterceptors=true + +# Whether retain null-key message during topic compaction +topicCompactionRetainNullKey=true diff --git a/distribution/io/pom.xml b/distribution/io/pom.xml index 568d76922bf4e..90a59b33f815a 100644 --- a/distribution/io/pom.xml +++ b/distribution/io/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative distribution 3.1.0-SNAPSHOT .. - pulsar-io-distribution pom Pulsar :: Distribution :: IO - ${project.groupId} @@ -46,7 +43,6 @@ provided - @@ -69,9 +65,32 @@ + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + - generator-connector-config @@ -151,5 +170,4 @@ - diff --git a/distribution/io/src/assemble/io.xml b/distribution/io/src/assemble/io.xml index 5b652170fdbb5..71d5aaf69f76a 100644 --- a/distribution/io/src/assemble/io.xml +++ b/distribution/io/src/assemble/io.xml @@ -1,3 +1,4 @@ + - + bin dir @@ -38,48 +37,120 @@ . 644 - - - ${basedir}/../../pulsar-io/cassandra/target/pulsar-io-cassandra-${project.version}.nar - ${basedir}/../../pulsar-io/twitter/target/pulsar-io-twitter-${project.version}.nar - ${basedir}/../../pulsar-io/kafka/target/pulsar-io-kafka-${project.version}.nar - ${basedir}/../../pulsar-io/http/target/pulsar-io-http-${project.version}.nar - ${basedir}/../../pulsar-io/kinesis/target/pulsar-io-kinesis-${project.version}.nar - ${basedir}/../../pulsar-io/rabbitmq/target/pulsar-io-rabbitmq-${project.version}.nar - ${basedir}/../../pulsar-io/nsq/target/pulsar-io-nsq-${project.version}.nar - ${basedir}/../../pulsar-io/jdbc/sqlite/target/pulsar-io-jdbc-sqlite-${project.version}.nar - ${basedir}/../../pulsar-io/jdbc/mariadb/target/pulsar-io-jdbc-mariadb-${project.version}.nar - ${basedir}/../../pulsar-io/jdbc/clickhouse/target/pulsar-io-jdbc-clickhouse-${project.version}.nar - ${basedir}/../../pulsar-io/jdbc/postgres/target/pulsar-io-jdbc-postgres-${project.version}.nar - ${basedir}/../../pulsar-io/jdbc/openmldb/target/pulsar-io-jdbc-openmldb-${project.version}.nar - ${basedir}/../../pulsar-io/data-generator/target/pulsar-io-data-generator-${project.version}.nar - ${basedir}/../../pulsar-io/batch-data-generator/target/pulsar-io-batch-data-generator-${project.version}.nar - ${basedir}/../../pulsar-io/aerospike/target/pulsar-io-aerospike-${project.version}.nar - ${basedir}/../../pulsar-io/elastic-search/target/pulsar-io-elastic-search-${project.version}.nar - ${basedir}/../../pulsar-io/kafka-connect-adaptor-nar/target/pulsar-io-kafka-connect-adaptor-${project.version}.nar - ${basedir}/../../pulsar-io/hbase/target/pulsar-io-hbase-${project.version}.nar - ${basedir}/../../pulsar-io/kinesis/target/pulsar-io-kinesis-${project.version}.nar - ${basedir}/../../pulsar-io/hdfs2/target/pulsar-io-hdfs2-${project.version}.nar - ${basedir}/../../pulsar-io/hdfs3/target/pulsar-io-hdfs3-${project.version}.nar - ${basedir}/../../pulsar-io/file/target/pulsar-io-file-${project.version}.nar - ${basedir}/../../pulsar-io/data-generator/target/pulsar-io-data-generator-${project.version}.nar - ${basedir}/../../pulsar-io/canal/target/pulsar-io-canal-${project.version}.nar - ${basedir}/../../pulsar-io/netty/target/pulsar-io-netty-${project.version}.nar - ${basedir}/../../pulsar-io/mongo/target/pulsar-io-mongo-${project.version}.nar - ${basedir}/../../pulsar-io/debezium/mysql/target/pulsar-io-debezium-mysql-${project.version}.nar - ${basedir}/../../pulsar-io/debezium/postgres/target/pulsar-io-debezium-postgres-${project.version}.nar - ${basedir}/../../pulsar-io/debezium/oracle/target/pulsar-io-debezium-oracle-${project.version}.nar - ${basedir}/../../pulsar-io/debezium/mssql/target/pulsar-io-debezium-mssql-${project.version}.nar - ${basedir}/../../pulsar-io/debezium/mongodb/target/pulsar-io-debezium-mongodb-${project.version}.nar - ${basedir}/../../pulsar-io/influxdb/target/pulsar-io-influxdb-${project.version}.nar - ${basedir}/../../pulsar-io/redis/target/pulsar-io-redis-${project.version}.nar - ${basedir}/../../pulsar-io/flume/target/pulsar-io-flume-${project.version}.nar - ${basedir}/../../pulsar-io/solr/target/pulsar-io-solr-${project.version}.nar - ${basedir}/../../pulsar-io/dynamodb/target/pulsar-io-dynamodb-${project.version}.nar - ${basedir}/../../pulsar-io/alluxio/target/pulsar-io-alluxio-${project.version}.nar + + ${basedir}/../../pulsar-io/cassandra/target/pulsar-io-cassandra-${project.version}.nar + + + ${basedir}/../../pulsar-io/twitter/target/pulsar-io-twitter-${project.version}.nar + + + ${basedir}/../../pulsar-io/kafka/target/pulsar-io-kafka-${project.version}.nar + + + ${basedir}/../../pulsar-io/http/target/pulsar-io-http-${project.version}.nar + + + ${basedir}/../../pulsar-io/kinesis/target/pulsar-io-kinesis-${project.version}.nar + + + ${basedir}/../../pulsar-io/rabbitmq/target/pulsar-io-rabbitmq-${project.version}.nar + + + ${basedir}/../../pulsar-io/nsq/target/pulsar-io-nsq-${project.version}.nar + + + ${basedir}/../../pulsar-io/jdbc/sqlite/target/pulsar-io-jdbc-sqlite-${project.version}.nar + + + ${basedir}/../../pulsar-io/jdbc/mariadb/target/pulsar-io-jdbc-mariadb-${project.version}.nar + + + ${basedir}/../../pulsar-io/jdbc/clickhouse/target/pulsar-io-jdbc-clickhouse-${project.version}.nar + + + ${basedir}/../../pulsar-io/jdbc/postgres/target/pulsar-io-jdbc-postgres-${project.version}.nar + + + ${basedir}/../../pulsar-io/jdbc/openmldb/target/pulsar-io-jdbc-openmldb-${project.version}.nar + + + ${basedir}/../../pulsar-io/data-generator/target/pulsar-io-data-generator-${project.version}.nar + + + ${basedir}/../../pulsar-io/batch-data-generator/target/pulsar-io-batch-data-generator-${project.version}.nar + + + ${basedir}/../../pulsar-io/aerospike/target/pulsar-io-aerospike-${project.version}.nar + + + ${basedir}/../../pulsar-io/elastic-search/target/pulsar-io-elastic-search-${project.version}.nar + + + ${basedir}/../../pulsar-io/kafka-connect-adaptor-nar/target/pulsar-io-kafka-connect-adaptor-${project.version}.nar + + + ${basedir}/../../pulsar-io/hbase/target/pulsar-io-hbase-${project.version}.nar + + + ${basedir}/../../pulsar-io/kinesis/target/pulsar-io-kinesis-${project.version}.nar + + + ${basedir}/../../pulsar-io/hdfs2/target/pulsar-io-hdfs2-${project.version}.nar + + + ${basedir}/../../pulsar-io/hdfs3/target/pulsar-io-hdfs3-${project.version}.nar + + + ${basedir}/../../pulsar-io/file/target/pulsar-io-file-${project.version}.nar + + + ${basedir}/../../pulsar-io/data-generator/target/pulsar-io-data-generator-${project.version}.nar + + + ${basedir}/../../pulsar-io/canal/target/pulsar-io-canal-${project.version}.nar + + + ${basedir}/../../pulsar-io/netty/target/pulsar-io-netty-${project.version}.nar + + + ${basedir}/../../pulsar-io/mongo/target/pulsar-io-mongo-${project.version}.nar + + + ${basedir}/../../pulsar-io/debezium/mysql/target/pulsar-io-debezium-mysql-${project.version}.nar + + + ${basedir}/../../pulsar-io/debezium/postgres/target/pulsar-io-debezium-postgres-${project.version}.nar + + + ${basedir}/../../pulsar-io/debezium/oracle/target/pulsar-io-debezium-oracle-${project.version}.nar + + + ${basedir}/../../pulsar-io/debezium/mssql/target/pulsar-io-debezium-mssql-${project.version}.nar + + + ${basedir}/../../pulsar-io/debezium/mongodb/target/pulsar-io-debezium-mongodb-${project.version}.nar + + + ${basedir}/../../pulsar-io/influxdb/target/pulsar-io-influxdb-${project.version}.nar + + + ${basedir}/../../pulsar-io/redis/target/pulsar-io-redis-${project.version}.nar + + + ${basedir}/../../pulsar-io/flume/target/pulsar-io-flume-${project.version}.nar + + + ${basedir}/../../pulsar-io/solr/target/pulsar-io-solr-${project.version}.nar + + + ${basedir}/../../pulsar-io/dynamodb/target/pulsar-io-dynamodb-${project.version}.nar + + + ${basedir}/../../pulsar-io/alluxio/target/pulsar-io-alluxio-${project.version}.nar + diff --git a/distribution/offloaders/pom.xml b/distribution/offloaders/pom.xml index d23ebec2ef26d..992cc41283841 100644 --- a/distribution/offloaders/pom.xml +++ b/distribution/offloaders/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative distribution 3.1.0-SNAPSHOT .. - pulsar-offloader-distribution pom Pulsar :: Distribution :: Offloader - ${project.groupId} @@ -66,7 +63,6 @@ - @@ -90,6 +86,30 @@ + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/distribution/offloaders/src/assemble/offloaders.xml b/distribution/offloaders/src/assemble/offloaders.xml index 38f7eee906064..19c60f68c21d4 100644 --- a/distribution/offloaders/src/assemble/offloaders.xml +++ b/distribution/offloaders/src/assemble/offloaders.xml @@ -1,3 +1,4 @@ + - + bin tar.gz @@ -38,13 +37,11 @@ . 644 - ${basedir}/../../tiered-storage/jcloud/target/tiered-storage-jcloud-${project.version}.nar offloaders 644 - ${basedir}/../../tiered-storage/file-system/target/tiered-storage-file-system-${project.version}.nar offloaders diff --git a/distribution/pom.xml b/distribution/pom.xml index 36a3fa1c5835a..7be5264888b20 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - distribution pom Pulsar :: Distribution - - main @@ -47,7 +43,6 @@ shell - core-modules @@ -55,7 +50,6 @@ - @@ -65,6 +59,30 @@ ${skipBuildDistribution} + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/distribution/server/pom.xml b/distribution/server/pom.xml index 2702bace54e6a..1fc9f28ba1025 100644 --- a/distribution/server/pom.xml +++ b/distribution/server/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative distribution 3.1.0-SNAPSHOT .. - pulsar-server-distribution pom Pulsar :: Distribution :: Server - ${project.groupId} pulsar-broker ${project.version} - ${project.groupId} pulsar-docs-tools ${project.version} - ${project.groupId} pulsar-proxy ${project.version} - ${project.groupId} pulsar-broker-auth-oidc ${project.version} - ${project.groupId} pulsar-broker-auth-sasl ${project.version} - ${project.groupId} pulsar-client-auth-sasl ${project.version} - jline jline ${jline.version} - org.apache.zookeeper zookeeper-prometheus-metrics @@ -95,7 +85,6 @@ - ${project.groupId} pulsar-package-bookkeeper-storage @@ -107,13 +96,11 @@ - ${project.groupId} pulsar-package-filesystem-storage ${project.version} - ${project.groupId} pulsar-client-tools @@ -125,75 +112,67 @@ - ${project.groupId} pulsar-testclient ${project.version} - org.apache.logging.log4j log4j-api - org.apache.logging.log4j log4j-core - org.apache.logging.log4j log4j-web - io.dropwizard.metrics metrics-core - io.dropwizard.metrics metrics-graphite + + + amqp-client + com.rabbitmq + + - io.dropwizard.metrics metrics-jvm - org.xerial.snappy snappy-java - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - org.apache.logging.log4j log4j-slf4j-impl - org.apache.bookkeeper.stats prometheus-metrics-provider - io.prometheus simpleclient_log4j2 - ${project.groupId} bouncy-castle-bc ${project.version} pkg - ${project.groupId} @@ -208,7 +187,6 @@ - ${project.groupId} pulsar-functions-worker @@ -225,7 +203,6 @@ - ${project.groupId} @@ -240,7 +217,6 @@ - ${project.groupId} @@ -253,7 +229,6 @@ - io.grpc @@ -263,13 +238,11 @@ org.bouncycastle bcpkix-jdk18on - io.perfmark perfmark-api compile - org.apache.bookkeeper.http @@ -291,7 +264,6 @@ vertx-web - @@ -332,6 +304,30 @@ + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/distribution/server/src/assemble/LICENSE.bin.txt b/distribution/server/src/assemble/LICENSE.bin.txt index 3bbb5183b0f7f..7eb652e2f60ab 100644 --- a/distribution/server/src/assemble/LICENSE.bin.txt +++ b/distribution/server/src/assemble/LICENSE.bin.txt @@ -259,7 +259,7 @@ The Apache Software License, Version 2.0 - com.fasterxml.jackson.module-jackson-module-parameter-names-2.14.2.jar * Caffeine -- com.github.ben-manes.caffeine-caffeine-2.9.1.jar * Conscrypt -- org.conscrypt-conscrypt-openjdk-uber-2.5.2.jar - * Proto Google Common Protos -- com.google.api.grpc-proto-google-common-protos-2.0.1.jar + * Proto Google Common Protos -- com.google.api.grpc-proto-google-common-protos-2.9.0.jar * Bitbucket -- org.bitbucket.b_c-jose4j-0.9.3.jar * Gson - com.google.code.gson-gson-2.8.9.jar @@ -285,31 +285,30 @@ The Apache Software License, Version 2.0 - commons-lang-commons-lang-2.6.jar - commons-logging-commons-logging-1.1.1.jar - org.apache.commons-commons-collections4-4.4.jar - - org.apache.commons-commons-compress-1.21.jar + - org.apache.commons-commons-compress-1.26.0.jar - org.apache.commons-commons-lang3-3.11.jar - org.apache.commons-commons-text-1.10.0.jar * Netty - - io.netty-netty-buffer-4.1.94.Final.jar - - io.netty-netty-codec-4.1.94.Final.jar - - io.netty-netty-codec-dns-4.1.94.Final.jar - - io.netty-netty-codec-http-4.1.94.Final.jar - - io.netty-netty-codec-http2-4.1.94.Final.jar - - io.netty-netty-codec-socks-4.1.94.Final.jar - - io.netty-netty-codec-haproxy-4.1.94.Final.jar - - io.netty-netty-common-4.1.94.Final.jar - - io.netty-netty-handler-4.1.94.Final.jar - - io.netty-netty-handler-proxy-4.1.94.Final.jar - - io.netty-netty-resolver-4.1.94.Final.jar - - io.netty-netty-resolver-dns-4.1.94.Final.jar - - io.netty-netty-resolver-dns-classes-macos-4.1.94.Final.jar - - io.netty-netty-resolver-dns-native-macos-4.1.94.Final-osx-aarch_64.jar - - io.netty-netty-resolver-dns-native-macos-4.1.94.Final-osx-x86_64.jar - - io.netty-netty-transport-4.1.94.Final.jar - - io.netty-netty-transport-classes-epoll-4.1.94.Final.jar - - io.netty-netty-transport-native-epoll-4.1.94.Final-linux-x86_64.jar - - io.netty-netty-transport-native-epoll-4.1.94.Final.jar - - io.netty-netty-transport-native-unix-common-4.1.94.Final.jar - - io.netty-netty-transport-native-unix-common-4.1.94.Final-linux-x86_64.jar + - io.netty-netty-buffer-4.1.100.Final.jar + - io.netty-netty-codec-4.1.100.Final.jar + - io.netty-netty-codec-dns-4.1.100.Final.jar + - io.netty-netty-codec-http-4.1.100.Final.jar + - io.netty-netty-codec-http2-4.1.100.Final.jar + - io.netty-netty-codec-socks-4.1.100.Final.jar + - io.netty-netty-codec-haproxy-4.1.100.Final.jar + - io.netty-netty-common-4.1.100.Final.jar + - io.netty-netty-handler-4.1.100.Final.jar + - io.netty-netty-handler-proxy-4.1.100.Final.jar + - io.netty-netty-resolver-4.1.100.Final.jar + - io.netty-netty-resolver-dns-4.1.100.Final.jar + - io.netty-netty-resolver-dns-classes-macos-4.1.100.Final.jar + - io.netty-netty-resolver-dns-native-macos-4.1.100.Final-osx-aarch_64.jar + - io.netty-netty-resolver-dns-native-macos-4.1.100.Final-osx-x86_64.jar + - io.netty-netty-transport-4.1.100.Final.jar + - io.netty-netty-transport-classes-epoll-4.1.100.Final.jar + - io.netty-netty-transport-native-epoll-4.1.100.Final-linux-x86_64.jar + - io.netty-netty-transport-native-unix-common-4.1.100.Final.jar + - io.netty-netty-transport-native-unix-common-4.1.100.Final-linux-x86_64.jar - io.netty-netty-tcnative-boringssl-static-2.0.61.Final.jar - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar @@ -346,34 +345,34 @@ The Apache Software License, Version 2.0 - net.java.dev.jna-jna-jpms-5.12.1.jar - net.java.dev.jna-jna-platform-jpms-5.12.1.jar * BookKeeper - - org.apache.bookkeeper-bookkeeper-common-4.16.2.jar - - org.apache.bookkeeper-bookkeeper-common-allocator-4.16.2.jar - - org.apache.bookkeeper-bookkeeper-proto-4.16.2.jar - - org.apache.bookkeeper-bookkeeper-server-4.16.2.jar - - org.apache.bookkeeper-bookkeeper-tools-framework-4.16.2.jar - - org.apache.bookkeeper-circe-checksum-4.16.2.jar - - org.apache.bookkeeper-cpu-affinity-4.16.2.jar - - org.apache.bookkeeper-statelib-4.16.2.jar - - org.apache.bookkeeper-stream-storage-api-4.16.2.jar - - org.apache.bookkeeper-stream-storage-common-4.16.2.jar - - org.apache.bookkeeper-stream-storage-java-client-4.16.2.jar - - org.apache.bookkeeper-stream-storage-java-client-base-4.16.2.jar - - org.apache.bookkeeper-stream-storage-proto-4.16.2.jar - - org.apache.bookkeeper-stream-storage-server-4.16.2.jar - - org.apache.bookkeeper-stream-storage-service-api-4.16.2.jar - - org.apache.bookkeeper-stream-storage-service-impl-4.16.2.jar - - org.apache.bookkeeper.http-http-server-4.16.2.jar - - org.apache.bookkeeper.http-vertx-http-server-4.16.2.jar - - org.apache.bookkeeper.stats-bookkeeper-stats-api-4.16.2.jar - - org.apache.bookkeeper.stats-prometheus-metrics-provider-4.16.2.jar - - org.apache.distributedlog-distributedlog-common-4.16.2.jar - - org.apache.distributedlog-distributedlog-core-4.16.2-tests.jar - - org.apache.distributedlog-distributedlog-core-4.16.2.jar - - org.apache.distributedlog-distributedlog-protocol-4.16.2.jar - - org.apache.bookkeeper.stats-codahale-metrics-provider-4.16.2.jar - - org.apache.bookkeeper-bookkeeper-slogger-api-4.16.2.jar - - org.apache.bookkeeper-bookkeeper-slogger-slf4j-4.16.2.jar - - org.apache.bookkeeper-native-io-4.16.2.jar + - org.apache.bookkeeper-bookkeeper-common-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-common-allocator-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-proto-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-server-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-tools-framework-4.16.4.jar + - org.apache.bookkeeper-circe-checksum-4.16.4.jar + - org.apache.bookkeeper-cpu-affinity-4.16.4.jar + - org.apache.bookkeeper-statelib-4.16.4.jar + - org.apache.bookkeeper-stream-storage-api-4.16.4.jar + - org.apache.bookkeeper-stream-storage-common-4.16.4.jar + - org.apache.bookkeeper-stream-storage-java-client-4.16.4.jar + - org.apache.bookkeeper-stream-storage-java-client-base-4.16.4.jar + - org.apache.bookkeeper-stream-storage-proto-4.16.4.jar + - org.apache.bookkeeper-stream-storage-server-4.16.4.jar + - org.apache.bookkeeper-stream-storage-service-api-4.16.4.jar + - org.apache.bookkeeper-stream-storage-service-impl-4.16.4.jar + - org.apache.bookkeeper.http-http-server-4.16.4.jar + - org.apache.bookkeeper.http-vertx-http-server-4.16.4.jar + - org.apache.bookkeeper.stats-bookkeeper-stats-api-4.16.4.jar + - org.apache.bookkeeper.stats-prometheus-metrics-provider-4.16.4.jar + - org.apache.distributedlog-distributedlog-common-4.16.4.jar + - org.apache.distributedlog-distributedlog-core-4.16.4-tests.jar + - org.apache.distributedlog-distributedlog-core-4.16.4.jar + - org.apache.distributedlog-distributedlog-protocol-4.16.4.jar + - org.apache.bookkeeper.stats-codahale-metrics-provider-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-slogger-api-4.16.4.jar + - org.apache.bookkeeper-bookkeeper-slogger-slf4j-4.16.4.jar + - org.apache.bookkeeper-native-io-4.16.4.jar * Apache HTTP Client - org.apache.httpcomponents-httpclient-4.5.13.jar - org.apache.httpcomponents-httpcore-4.4.15.jar @@ -383,25 +382,25 @@ The Apache Software License, Version 2.0 - org.asynchttpclient-async-http-client-2.12.1.jar - org.asynchttpclient-async-http-client-netty-utils-2.12.1.jar * Jetty - - org.eclipse.jetty-jetty-client-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-continuation-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-http-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-io-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-proxy-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-security-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-server-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-servlet-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-servlets-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-util-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-util-ajax-9.4.51.v20230217.jar - - org.eclipse.jetty.websocket-javax-websocket-client-impl-9.4.51.v20230217.jar - - org.eclipse.jetty.websocket-websocket-api-9.4.51.v20230217.jar - - org.eclipse.jetty.websocket-websocket-client-9.4.51.v20230217.jar - - org.eclipse.jetty.websocket-websocket-common-9.4.51.v20230217.jar - - org.eclipse.jetty.websocket-websocket-server-9.4.51.v20230217.jar - - org.eclipse.jetty.websocket-websocket-servlet-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-alpn-conscrypt-server-9.4.51.v20230217.jar - - org.eclipse.jetty-jetty-alpn-server-9.4.51.v20230217.jar + - org.eclipse.jetty-jetty-client-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-continuation-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-http-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-io-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-proxy-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-security-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-server-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-servlet-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-servlets-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-util-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-util-ajax-9.4.54.v20240208.jar + - org.eclipse.jetty.websocket-javax-websocket-client-impl-9.4.54.v20240208.jar + - org.eclipse.jetty.websocket-websocket-api-9.4.54.v20240208.jar + - org.eclipse.jetty.websocket-websocket-client-9.4.54.v20240208.jar + - org.eclipse.jetty.websocket-websocket-common-9.4.54.v20240208.jar + - org.eclipse.jetty.websocket-websocket-server-9.4.54.v20240208.jar + - org.eclipse.jetty.websocket-websocket-servlet-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-alpn-conscrypt-server-9.4.54.v20240208.jar + - org.eclipse.jetty-jetty-alpn-server-9.4.54.v20240208.jar * SnakeYaml -- org.yaml-snakeyaml-2.0.jar * RocksDB - org.rocksdb-rocksdbjni-7.9.2.jar * Google Error Prone Annotations - com.google.errorprone-error_prone_annotations-2.5.1.jar @@ -409,7 +408,9 @@ The Apache Software License, Version 2.0 * OkHttp3 - com.squareup.okhttp3-logging-interceptor-4.9.3.jar - com.squareup.okhttp3-okhttp-4.9.3.jar - * Okio - com.squareup.okio-okio-2.8.0.jar + * Okio + - com.squareup.okio-okio-3.4.0.jar + - com.squareup.okio-okio-jvm-3.4.0.jar * Javassist -- org.javassist-javassist-3.25.0-GA.jar * Kotlin Standard Lib - org.jetbrains.kotlin-kotlin-stdlib-1.8.20.jar @@ -418,23 +419,25 @@ The Apache Software License, Version 2.0 - org.jetbrains.kotlin-kotlin-stdlib-jdk8-1.8.20.jar - org.jetbrains-annotations-13.0.jar * gRPC - - io.grpc-grpc-all-1.45.1.jar - - io.grpc-grpc-auth-1.45.1.jar - - io.grpc-grpc-context-1.45.1.jar - - io.grpc-grpc-core-1.45.1.jar - - io.grpc-grpc-netty-1.45.1.jar - - io.grpc-grpc-protobuf-1.45.1.jar - - io.grpc-grpc-protobuf-lite-1.45.1.jar - - io.grpc-grpc-stub-1.45.1.jar - - io.grpc-grpc-alts-1.45.1.jar - - io.grpc-grpc-api-1.45.1.jar - - io.grpc-grpc-grpclb-1.45.1.jar - - io.grpc-grpc-netty-shaded-1.45.1.jar - - io.grpc-grpc-services-1.45.1.jar - - io.grpc-grpc-xds-1.45.1.jar - - io.grpc-grpc-rls-1.45.1.jar + - io.grpc-grpc-all-1.55.3.jar + - io.grpc-grpc-auth-1.55.3.jar + - io.grpc-grpc-context-1.55.3.jar + - io.grpc-grpc-core-1.55.3.jar + - io.grpc-grpc-netty-1.55.3.jar + - io.grpc-grpc-protobuf-1.55.3.jar + - io.grpc-grpc-protobuf-lite-1.55.3.jar + - io.grpc-grpc-stub-1.55.3.jar + - io.grpc-grpc-alts-1.55.3.jar + - io.grpc-grpc-api-1.55.3.jar + - io.grpc-grpc-grpclb-1.55.3.jar + - io.grpc-grpc-netty-shaded-1.55.3.jar + - io.grpc-grpc-services-1.55.3.jar + - io.grpc-grpc-xds-1.55.3.jar + - io.grpc-grpc-rls-1.55.3.jar + - io.grpc-grpc-servlet-1.55.3.jar + - io.grpc-grpc-servlet-jakarta-1.55.3.jar * Perfmark - - io.perfmark-perfmark-api-0.19.0.jar + - io.perfmark-perfmark-api-0.26.0.jar * OpenCensus - io.opencensus-opencensus-api-0.28.0.jar - io.opencensus-opencensus-contrib-http-util-0.28.0.jar @@ -442,9 +445,13 @@ The Apache Software License, Version 2.0 * Jodah - net.jodah-typetools-0.5.0.jar - net.jodah-failsafe-2.4.4.jar + * Byte Buddy + - net.bytebuddy-byte-buddy-1.14.12.jar + * zt-zip + - org.zeroturnaround-zt-zip-1.17.jar * Apache Avro - - org.apache.avro-avro-1.10.2.jar - - org.apache.avro-avro-protobuf-1.10.2.jar + - org.apache.avro-avro-1.11.3.jar + - org.apache.avro-avro-protobuf-1.11.3.jar * Apache Curator - org.apache.curator-curator-client-5.1.0.jar - org.apache.curator-curator-framework-5.1.0.jar @@ -476,16 +483,16 @@ The Apache Software License, Version 2.0 - io.vertx-vertx-web-common-4.3.8.jar - io.vertx-vertx-grpc-4.3.5.jar * Apache ZooKeeper - - org.apache.zookeeper-zookeeper-3.8.1.jar - - org.apache.zookeeper-zookeeper-jute-3.8.1.jar - - org.apache.zookeeper-zookeeper-prometheus-metrics-3.8.1.jar + - org.apache.zookeeper-zookeeper-3.9.1.jar + - org.apache.zookeeper-zookeeper-jute-3.9.1.jar + - org.apache.zookeeper-zookeeper-prometheus-metrics-3.9.1.jar * Snappy Java - - org.xerial.snappy-snappy-java-1.1.10.1.jar + - org.xerial.snappy-snappy-java-1.1.10.5.jar * Google HTTP Client - com.google.http-client-google-http-client-gson-1.41.0.jar - com.google.http-client-google-http-client-1.41.0.jar - com.google.auto.value-auto-value-annotations-1.9.jar - - com.google.re2j-re2j-1.5.jar + - com.google.re2j-re2j-1.6.jar * Jetcd - io.etcd-jetcd-api-0.7.5.jar - io.etcd-jetcd-common-0.7.5.jar @@ -495,8 +502,6 @@ The Apache Software License, Version 2.0 - com.github.seancfoley-ipaddress-5.3.3.jar * RxJava - io.reactivex.rxjava3-rxjava-3.0.1.jar - * RabbitMQ Java Client - - com.rabbitmq-amqp-client-5.5.3.jar * RoaringBitmap - org.roaringbitmap-RoaringBitmap-0.9.44.jar diff --git a/distribution/server/src/assemble/bin.xml b/distribution/server/src/assemble/bin.xml index 41ac24d0582da..f65a8e2fb7260 100644 --- a/distribution/server/src/assemble/bin.xml +++ b/distribution/server/src/assemble/bin.xml @@ -1,3 +1,4 @@ + - + bin tar.gz @@ -131,14 +130,11 @@ ${artifact.groupId}-${artifact.artifactId}-${artifact.version}${dashClassifier?}.${artifact.extension} - - org.apache.pulsar:pulsar-functions-runtime-all - + io.streamnative:pulsar-functions-runtime-all org.projectlombok:lombok - - org.apache.pulsar:pulsar-functions-api-examples + io.streamnative:pulsar-functions-api-examples *:tar.gz diff --git a/distribution/shell/pom.xml b/distribution/shell/pom.xml index 9e3134a75e5bf..a119a3d35e655 100644 --- a/distribution/shell/pom.xml +++ b/distribution/shell/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative distribution 3.1.0-SNAPSHOT .. - pulsar-shell-distribution pom Pulsar :: Distribution :: Shell - ${project.groupId} pulsar-client-tools ${project.version} - org.apache.logging.log4j log4j-core - org.apache.logging.log4j log4j-web @@ -53,14 +48,11 @@ org.apache.logging.log4j log4j-slf4j-impl - io.prometheus simpleclient_log4j2 - - @@ -101,6 +93,30 @@ + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/distribution/shell/src/assemble/LICENSE.bin.txt b/distribution/shell/src/assemble/LICENSE.bin.txt index dc8babb32f329..5d880a018c53f 100644 --- a/distribution/shell/src/assemble/LICENSE.bin.txt +++ b/distribution/shell/src/assemble/LICENSE.bin.txt @@ -342,24 +342,24 @@ The Apache Software License, Version 2.0 - commons-logging-1.2.jar - commons-lang3-3.11.jar - commons-text-1.10.0.jar - - commons-compress-1.21.jar + - commons-compress-1.26.0.jar * Netty - - netty-buffer-4.1.94.Final.jar - - netty-codec-4.1.94.Final.jar - - netty-codec-dns-4.1.94.Final.jar - - netty-codec-http-4.1.94.Final.jar - - netty-codec-socks-4.1.94.Final.jar - - netty-codec-haproxy-4.1.94.Final.jar - - netty-common-4.1.94.Final.jar - - netty-handler-4.1.94.Final.jar - - netty-handler-proxy-4.1.94.Final.jar - - netty-resolver-4.1.94.Final.jar - - netty-resolver-dns-4.1.94.Final.jar - - netty-transport-4.1.94.Final.jar - - netty-transport-classes-epoll-4.1.94.Final.jar - - netty-transport-native-epoll-4.1.94.Final-linux-x86_64.jar - - netty-transport-native-unix-common-4.1.94.Final.jar - - netty-transport-native-unix-common-4.1.94.Final-linux-x86_64.jar + - netty-buffer-4.1.100.Final.jar + - netty-codec-4.1.100.Final.jar + - netty-codec-dns-4.1.100.Final.jar + - netty-codec-http-4.1.100.Final.jar + - netty-codec-socks-4.1.100.Final.jar + - netty-codec-haproxy-4.1.100.Final.jar + - netty-common-4.1.100.Final.jar + - netty-handler-4.1.100.Final.jar + - netty-handler-proxy-4.1.100.Final.jar + - netty-resolver-4.1.100.Final.jar + - netty-resolver-dns-4.1.100.Final.jar + - netty-transport-4.1.100.Final.jar + - netty-transport-classes-epoll-4.1.100.Final.jar + - netty-transport-native-epoll-4.1.100.Final-linux-x86_64.jar + - netty-transport-native-unix-common-4.1.100.Final.jar + - netty-transport-native-unix-common-4.1.100.Final-linux-x86_64.jar - netty-tcnative-boringssl-static-2.0.61.Final.jar - netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar - netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar @@ -370,9 +370,9 @@ The Apache Software License, Version 2.0 - netty-incubator-transport-classes-io_uring-0.0.21.Final.jar - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar - - netty-resolver-dns-classes-macos-4.1.94.Final.jar - - netty-resolver-dns-native-macos-4.1.94.Final-osx-aarch_64.jar - - netty-resolver-dns-native-macos-4.1.94.Final-osx-x86_64.jar + - netty-resolver-dns-classes-macos-4.1.100.Final.jar + - netty-resolver-dns-native-macos-4.1.100.Final-osx-aarch_64.jar + - netty-resolver-dns-native-macos-4.1.100.Final-osx-x86_64.jar * Prometheus client - simpleclient-0.16.0.jar - simpleclient_log4j2-0.16.0.jar @@ -386,29 +386,29 @@ The Apache Software License, Version 2.0 - log4j-web-2.18.0.jar * BookKeeper - - bookkeeper-common-allocator-4.16.2.jar - - cpu-affinity-4.16.2.jar - - circe-checksum-4.16.2.jar + - bookkeeper-common-allocator-4.16.4.jar + - cpu-affinity-4.16.4.jar + - circe-checksum-4.16.4.jar * AirCompressor - aircompressor-0.20.jar * AsyncHttpClient - async-http-client-2.12.1.jar - async-http-client-netty-utils-2.12.1.jar * Jetty - - jetty-client-9.4.51.v20230217.jar - - jetty-http-9.4.51.v20230217.jar - - jetty-io-9.4.51.v20230217.jar - - jetty-util-9.4.51.v20230217.jar - - javax-websocket-client-impl-9.4.51.v20230217.jar - - websocket-api-9.4.51.v20230217.jar - - websocket-client-9.4.51.v20230217.jar - - websocket-common-9.4.51.v20230217.jar + - jetty-client-9.4.54.v20240208.jar + - jetty-http-9.4.54.v20240208.jar + - jetty-io-9.4.54.v20240208.jar + - jetty-util-9.4.54.v20240208.jar + - javax-websocket-client-impl-9.4.54.v20240208.jar + - websocket-api-9.4.54.v20240208.jar + - websocket-client-9.4.54.v20240208.jar + - websocket-common-9.4.54.v20240208.jar * SnakeYaml -- snakeyaml-2.0.jar * Google Error Prone Annotations - error_prone_annotations-2.5.1.jar * Javassist -- javassist-3.25.0-GA.jar * Apache Avro - - avro-1.10.2.jar - - avro-protobuf-1.10.2.jar + - avro-1.11.3.jar + - avro-protobuf-1.11.3.jar BSD 3-clause "New" or "Revised" License * JSR305 -- jsr305-3.0.2.jar -- ../licenses/LICENSE-JSR305.txt diff --git a/distribution/shell/src/assemble/shell.xml b/distribution/shell/src/assemble/shell.xml index f823e0258b231..cd11bd8a1ef57 100644 --- a/distribution/shell/src/assemble/shell.xml +++ b/distribution/shell/src/assemble/shell.xml @@ -1,3 +1,4 @@ + - + bin tar.gz @@ -46,7 +45,6 @@ . 644 - bin ${basedir}/../../bin/pulsar-admin-common.sh @@ -76,7 +74,6 @@ ${basedir}/../../conf/log4j2.yaml - lib diff --git a/docker/pom.xml b/docker/pom.xml index afe55f0fe57f0..0f1842b1665ec 100644 --- a/docker/pom.xml +++ b/docker/pom.xml @@ -1,4 +1,4 @@ - + - + pom 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT @@ -39,6 +38,30 @@ ${skipBuildDistribution} + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/docker/pulsar-all/pom.xml b/docker/pulsar-all/pom.xml index e616ac132d319..f06ecaaf6c839 100644 --- a/docker/pulsar-all/pom.xml +++ b/docker/pulsar-all/pom.xml @@ -1,3 +1,4 @@ + - + - org.apache.pulsar + io.streamnative docker-images 3.1.0-SNAPSHOT @@ -29,7 +29,6 @@ pulsar-all-docker-image Apache Pulsar :: Docker Images :: Pulsar Latest Version (Include All Components) pom - ${project.groupId} @@ -64,7 +63,6 @@ - git-commit-id-no-git @@ -177,7 +175,6 @@ - docker-push @@ -200,6 +197,5 @@ - diff --git a/docker/pulsar/Dockerfile b/docker/pulsar/Dockerfile index 593401f57be8c..b465bbf251da8 100644 --- a/docker/pulsar/Dockerfile +++ b/docker/pulsar/Dockerfile @@ -36,10 +36,14 @@ COPY scripts/install-pulsar-client.sh /pulsar/bin # The final image needs to give the root group sufficient permission for Pulsar components # to write to specific directories within /pulsar +# The ownership is changed to uid 10000 to allow using a different root group. This is necessary when running the +# container when gid=0 is prohibited. In that case, the container must be run with uid 10000 with +# any group id != 0 (for example 10001). # The file permissions are preserved when copying files from this builder image to the target image. RUN for SUBDIRECTORY in conf data download logs; do \ [ -d /pulsar/$SUBDIRECTORY ] || mkdir /pulsar/$SUBDIRECTORY; \ - chmod -R g+w /pulsar/$SUBDIRECTORY; \ + chmod -R ug+w /pulsar/$SUBDIRECTORY; \ + chown -R 10000:0 /pulsar/$SUBDIRECTORY; \ done # Trino writes logs to this directory (at least during tests), so we need to give the process permission @@ -54,6 +58,7 @@ FROM ubuntu:22.04 ARG DEBIAN_FRONTEND=noninteractive ARG UBUNTU_MIRROR=mirror://mirrors.ubuntu.com/mirrors.txt ARG UBUNTU_SECURITY_MIRROR=http://security.ubuntu.com/ubuntu/ +ARG DEFAULT_USERNAME=pulsar # Install some utilities RUN sed -i -e "s|http://archive\.ubuntu\.com/ubuntu/|${UBUNTU_MIRROR:-mirror://mirrors.ubuntu.com/mirrors.txt}|g" \ @@ -61,9 +66,9 @@ RUN sed -i -e "s|http://archive\.ubuntu\.com/ubuntu/|${UBUNTU_MIRROR:-mirror://m && echo 'Acquire::http::Timeout "30";\nAcquire::ftp::Timeout "30";\nAcquire::Retries "3";' > /etc/apt/apt.conf.d/99timeout_and_retries \ && apt-get update \ && apt-get -y dist-upgrade \ - && apt-get -y install --no-install-recommends netcat dnsutils less procps iputils-ping \ - python3 python3-kazoo python3-pip \ - curl ca-certificates wget apt-transport-https + && apt-get -y install netcat dnsutils less procps iputils-ping \ + curl ca-certificates wget apt-transport-https \ + && apt-get -y install --no-install-recommends python3 python3-kazoo python3-pip # Install Eclipse Temurin Package RUN mkdir -p /etc/apt/keyrings \ @@ -104,4 +109,5 @@ RUN chmod +x /pulsar/bin/install-pulsar-client.sh RUN /pulsar/bin/install-pulsar-client.sh # The UID must be non-zero. Otherwise, it is arbitrary. No logic should rely on its specific value. +RUN useradd ${DEFAULT_USERNAME} -u 10000 -g 0 USER 10000 diff --git a/docker/pulsar/pom.xml b/docker/pulsar/pom.xml index 85d86cff12523..ec8cc5e2b46da 100644 --- a/docker/pulsar/pom.xml +++ b/docker/pulsar/pom.xml @@ -1,3 +1,4 @@ + - + - org.apache.pulsar + io.streamnative docker-images 3.1.0-SNAPSHOT @@ -29,7 +29,6 @@ pulsar-docker-image Apache Pulsar :: Docker Images :: Pulsar Latest Version pom - ${project.groupId} @@ -46,12 +45,10 @@ - mirror://mirrors.ubuntu.com/mirrors.txt http://security.ubuntu.com/ubuntu/ - git-commit-id-no-git @@ -128,7 +125,6 @@ - docker-push @@ -151,6 +147,5 @@ - diff --git a/docker/pulsar/scripts/apply-config-from-env-with-prefix.py b/docker/pulsar/scripts/apply-config-from-env-with-prefix.py index 58f6c98975005..9943b283a9f89 100755 --- a/docker/pulsar/scripts/apply-config-from-env-with-prefix.py +++ b/docker/pulsar/scripts/apply-config-from-env-with-prefix.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/env bash # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -32,83 +32,8 @@ # update if they exist and ignored if they don't. ############################################################ -import os -import sys - -if len(sys.argv) < 3: - print('Usage: %s [...]' % (sys.argv[0])) - sys.exit(1) - -# Always apply env config to env scripts as well -prefix = sys.argv[1] -conf_files = sys.argv[2:] - -PF_ENV_DEBUG = (os.environ.get('PF_ENV_DEBUG','0') == '1') - -for conf_filename in conf_files: - lines = [] # List of config file lines - keys = {} # Map a key to its line number in the file - - # Load conf file - for line in open(conf_filename): - lines.append(line) - line = line.strip() - if not line or line.startswith('#'): - continue - - try: - k,v = line.split('=', 1) - keys[k] = len(lines) - 1 - except: - if PF_ENV_DEBUG: - print("[%s] skip Processing %s" % (conf_filename, line)) - - # Update values from Env - for k in sorted(os.environ.keys()): - v = os.environ[k].strip() - - # Hide the value in logs if is password. - if "password" in k.lower(): - displayValue = "********" - else: - displayValue = v - - if k.startswith(prefix): - k = k[len(prefix):] - if k in keys: - print('[%s] Applying config %s = %s' % (conf_filename, k, displayValue)) - idx = keys[k] - lines[idx] = '%s=%s\n' % (k, v) - - - # Ensure we have a new-line at the end of the file, to avoid issue - # when appending more lines to the config - lines.append('\n') - - # Add new keys from Env - for k in sorted(os.environ.keys()): - v = os.environ[k] - if not k.startswith(prefix): - continue - - # Hide the value in logs if is password. - if "password" in k.lower(): - displayValue = "********" - else: - displayValue = v - - k = k[len(prefix):] - if k not in keys: - print('[%s] Adding config %s = %s' % (conf_filename, k, displayValue)) - lines.append('%s=%s\n' % (k, v)) - else: - print('[%s] Updating config %s = %s' % (conf_filename, k, displayValue)) - lines[keys[k]] = '%s=%s\n' % (k, v) - - - # Store back the updated config in the same file - f = open(conf_filename, 'w') - for line in lines: - f.write(line) - f.close() +# DEPRECATED: Use "apply-config-from-env.py --prefix MY_PREFIX_ conf_file" instead +# this is not a python script, but a bash script. Call apply-config-from-env.py with the prefix argument +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +"${SCRIPT_DIR}/apply-config-from-env.py" --prefix "$1" "${@:2}" diff --git a/docker/pulsar/scripts/apply-config-from-env.py b/docker/pulsar/scripts/apply-config-from-env.py index b8b479fc15b85..da51f05f8be66 100755 --- a/docker/pulsar/scripts/apply-config-from-env.py +++ b/docker/pulsar/scripts/apply-config-from-env.py @@ -25,18 +25,29 @@ ## ./apply-config-from-env file.conf ## -import os, sys - -if len(sys.argv) < 2: - print('Usage: %s' % (sys.argv[0])) +import os, sys, argparse + +parser = argparse.ArgumentParser(description='Pulsar configuration file customizer based on environment variables') +parser.add_argument('--prefix', default='PULSAR_PREFIX_', help='Prefix for environment variables, default is PULSAR_PREFIX_') +parser.add_argument('conf_files', nargs='*', help='Configuration files') +args = parser.parse_args() +if not args.conf_files: + parser.print_help() sys.exit(1) -# Always apply env config to env scripts as well -conf_files = sys.argv[1:] +env_prefix = args.prefix +conf_files = args.conf_files -PF_ENV_PREFIX = 'PULSAR_PREFIX_' PF_ENV_DEBUG = (os.environ.get('PF_ENV_DEBUG','0') == '1') +# List of keys where the value should not be displayed in logs +sensitive_keys = ["brokerClientAuthenticationParameters", "bookkeeperClientAuthenticationParameters", "tokenSecretKey"] + +def sanitize_display_value(k, v): + if "password" in k.lower() or k in sensitive_keys or (k == "tokenSecretKey" and v.startswith("data:")): + return "********" + return v + for conf_filename in conf_files: lines = [] # List of config file lines keys = {} # Map a key to its line number in the file @@ -47,7 +58,6 @@ line = line.strip() if not line: continue - try: k,v = line.split('=', 1) if k.startswith('#'): @@ -61,37 +71,26 @@ for k in sorted(os.environ.keys()): v = os.environ[k].strip() - # Hide the value in logs if is password. - if "password" in k.lower(): - displayValue = "********" - else: - displayValue = v - - if k.startswith(PF_ENV_PREFIX): - k = k[len(PF_ENV_PREFIX):] if k in keys: + displayValue = sanitize_display_value(k, v) print('[%s] Applying config %s = %s' % (conf_filename, k, displayValue)) idx = keys[k] lines[idx] = '%s=%s\n' % (k, v) - # Ensure we have a new-line at the end of the file, to avoid issue # when appending more lines to the config lines.append('\n') - - # Add new keys from Env + + # Add new keys from Env for k in sorted(os.environ.keys()): - v = os.environ[k] - if not k.startswith(PF_ENV_PREFIX): + if not k.startswith(env_prefix): continue - # Hide the value in logs if is password. - if "password" in k.lower(): - displayValue = "********" - else: - displayValue = v + v = os.environ[k].strip() + k = k[len(env_prefix):] + + displayValue = sanitize_display_value(k, v) - k = k[len(PF_ENV_PREFIX):] if k not in keys: print('[%s] Adding config %s = %s' % (conf_filename, k, displayValue)) lines.append('%s=%s\n' % (k, v)) @@ -99,10 +98,8 @@ print('[%s] Updating config %s = %s' % (conf_filename, k, displayValue)) lines[keys[k]] = '%s=%s\n' % (k, v) - # Store back the updated config in the same file f = open(conf_filename, 'w') for line in lines: f.write(line) - f.close() - + f.close() \ No newline at end of file diff --git a/jclouds-shaded/pom.xml b/jclouds-shaded/pom.xml index dfb155c2d5a7d..0e5c03bc7998b 100644 --- a/jclouds-shaded/pom.xml +++ b/jclouds-shaded/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - jclouds-shaded Apache Pulsar :: Jclouds shaded - - org.apache.jclouds jclouds-allblobstore @@ -66,7 +61,6 @@ javax.annotation-api - @@ -82,7 +76,6 @@ true true false - com.google.guava:guava @@ -106,7 +99,6 @@ com.google.errorprone:* - com.google @@ -140,21 +132,20 @@ com.google.errorprone org.apache.pulsar.jcloud.shade.com.google.errorprone - - - org.apache.jclouds:jclouds-core - - - lib/gson*jar - - - + + org.apache.jclouds:jclouds-core + + + lib/gson*jar + + + diff --git a/managed-ledger/pom.xml b/managed-ledger/pom.xml index a8cb560b7b376..dba9f5674f37b 100644 --- a/managed-ledger/pom.xml +++ b/managed-ledger/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - managed-ledger Managed Ledger - org.apache.bookkeeper bookkeeper-server - org.apache.bookkeeper.stats prometheus-metrics-provider - org.apache.bookkeeper.stats codahale-metrics-provider ${bookkeeper.version} + + + amqp-client + com.rabbitmq + + - com.google.protobuf protobuf-java - ${project.groupId} pulsar-common ${project.version} - ${project.groupId} pulsar-metadata ${project.version} - com.google.guava guava - ${project.groupId} testmocks ${project.version} test - org.apache.zookeeper zookeeper @@ -99,29 +94,25 @@ - io.dropwizard.metrics - metrics-core - test + io.dropwizard.metrics + metrics-core + test - org.xerial.snappy - snappy-java - test + org.xerial.snappy + snappy-java + test - org.awaitility awaitility test - org.slf4j slf4j-api - - @@ -141,7 +132,6 @@ - org.apache.maven.plugins maven-jar-plugin diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java index d1ffdf6d2d763..bc6a1e9a782d6 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java @@ -517,6 +517,10 @@ void markDelete(Position position, Map properties) */ void rewind(); + default void rewind(boolean readCompacted) { + rewind(); + } + /** * Move the cursor to a different read position. * diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java index c7dd8ea9129b7..f91d9ec3f5a02 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java @@ -682,8 +682,10 @@ default void skipNonRecoverableLedger(long ledgerId){} /** * Check current inactive ledger (based on {@link ManagedLedgerConfig#getInactiveLedgerRollOverTimeMs()} and * roll over that ledger if inactive. + * + * @return true if ledger is considered for rolling over */ - void checkInactiveLedgerAndRollOver(); + boolean checkInactiveLedgerAndRollOver(); /** * Check if managed ledger should cache backlog reads. diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java index 0c93a5b642cf6..6ee9c2f949243 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java @@ -170,6 +170,7 @@ public int getMinimumRolloverTimeMs() { * the time unit */ public void setMinimumRolloverTime(int minimumRolloverTime, TimeUnit unit) { + checkArgument(minimumRolloverTime >= 0); this.minimumRolloverTimeMs = (int) unit.toMillis(minimumRolloverTime); checkArgument(maximumRolloverTimeMs >= minimumRolloverTimeMs, "Minimum rollover time needs to be less than maximum rollover time"); @@ -195,6 +196,7 @@ public long getMaximumRolloverTimeMs() { * the time unit */ public void setMaximumRolloverTime(int maximumRolloverTime, TimeUnit unit) { + checkArgument(maximumRolloverTime >= 0); this.maximumRolloverTimeMs = unit.toMillis(maximumRolloverTime); checkArgument(maximumRolloverTimeMs >= minimumRolloverTimeMs, "Maximum rollover time needs to be greater than minimum rollover time"); @@ -411,7 +413,8 @@ public ManagedLedgerConfig setThrottleMarkDelete(double throttleMarkDelete) { * time unit for retention time */ public ManagedLedgerConfig setRetentionTime(int retentionTime, TimeUnit unit) { - this.retentionTimeMs = unit.toMillis(retentionTime); + checkArgument(retentionTime >= -1, "The retention time should be -1, 0 or value > 0"); + this.retentionTimeMs = retentionTime != -1 ? unit.toMillis(retentionTime) : -1; return this; } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java index b1427bab80b22..e09fd84ea55f2 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactory.java @@ -90,7 +90,7 @@ ManagedLedger open(String name, ManagedLedgerConfig config) * opaque context */ void asyncOpen(String name, ManagedLedgerConfig config, OpenLedgerCallback callback, - Supplier mlOwnershipChecker, Object ctx); + Supplier> mlOwnershipChecker, Object ctx); /** * Open a {@link ReadOnlyCursor} positioned to the earliest entry for the specified managed ledger. diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java index 8ce3a322c09be..8db8a5714398c 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java @@ -59,6 +59,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; import java.util.function.Predicate; +import java.util.stream.LongStream; import org.apache.bookkeeper.client.AsyncCallback.CloseCallback; import org.apache.bookkeeper.client.AsyncCallback.OpenCallback; import org.apache.bookkeeper.client.BKException; @@ -195,11 +196,11 @@ public class ManagedCursorImpl implements ManagedCursor { position.ackSet = null; return position; }; - private final RangeSetWrapper individualDeletedMessages; + protected final RangeSetWrapper individualDeletedMessages; // Maintain the deletion status for batch messages // (ledgerId, entryId) -> deletion indexes - private final ConcurrentSkipListMap batchDeletedIndexes; + protected final ConcurrentSkipListMap batchDeletedIndexes; private final ReadWriteLock lock = new ReentrantReadWriteLock(); private RateLimiter markDeleteLimiter; @@ -509,7 +510,7 @@ public void operationComplete(ManagedCursorInfo info, Stat stat) { callback.operationComplete(); } else { // Need to proceed and read the last entry in the specified ledger to find out the last position - log.info("[{}] Consumer {} meta-data recover from ledger {}", ledger.getName(), name, + log.info("[{}] Cursor {} meta-data recover from ledger {}", ledger.getName(), name, info.getCursorsLedgerId()); recoverFromLedger(info, callback); } @@ -529,16 +530,16 @@ protected void recoverFromLedger(final ManagedCursorInfo info, final VoidCallbac long ledgerId = info.getCursorsLedgerId(); OpenCallback openCallback = (rc, lh, ctx) -> { if (log.isInfoEnabled()) { - log.info("[{}] Opened ledger {} for consumer {}. rc={}", ledger.getName(), ledgerId, name, rc); + log.info("[{}] Opened ledger {} for cursor {}. rc={}", ledger.getName(), ledgerId, name, rc); } if (isBkErrorNotRecoverable(rc)) { - log.error("[{}] Error opening metadata ledger {} for consumer {}: {}", ledger.getName(), ledgerId, name, + log.error("[{}] Error opening metadata ledger {} for cursor {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc)); // Rewind to oldest entry available initialize(getRollbackPosition(info), Collections.emptyMap(), Collections.emptyMap(), callback); return; } else if (rc != BKException.Code.OK) { - log.warn("[{}] Error opening metadata ledger {} for consumer {}: {}", ledger.getName(), ledgerId, name, + log.warn("[{}] Error opening metadata ledger {} for cursor {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc)); callback.operationFailed(new ManagedLedgerException(BKException.getMessage(rc))); return; @@ -548,7 +549,7 @@ protected void recoverFromLedger(final ManagedCursorInfo info, final VoidCallbac long lastEntryInLedger = lh.getLastAddConfirmed(); if (lastEntryInLedger < 0) { - log.warn("[{}] Error reading from metadata ledger {} for consumer {}: No entries in ledger", + log.warn("[{}] Error reading from metadata ledger {} for cursor {}: No entries in ledger", ledger.getName(), ledgerId, name); // Rewind to last cursor snapshot available initialize(getRollbackPosition(info), Collections.emptyMap(), cursorProperties, callback); @@ -560,13 +561,13 @@ protected void recoverFromLedger(final ManagedCursorInfo info, final VoidCallbac log.debug("[{}} readComplete rc={} entryId={}", ledger.getName(), rc1, lh1.getLastAddConfirmed()); } if (isBkErrorNotRecoverable(rc1)) { - log.error("[{}] Error reading from metadata ledger {} for consumer {}: {}", ledger.getName(), + log.error("[{}] Error reading from metadata ledger {} for cursor {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc1)); // Rewind to oldest entry available initialize(getRollbackPosition(info), Collections.emptyMap(), cursorProperties, callback); return; } else if (rc1 != BKException.Code.OK) { - log.warn("[{}] Error reading from metadata ledger {} for consumer {}: {}", ledger.getName(), + log.warn("[{}] Error reading from metadata ledger {} for cursor {}: {}", ledger.getName(), ledgerId, name, BKException.getMessage(rc1)); callback.operationFailed(createManagedLedgerException(rc1)); @@ -676,7 +677,7 @@ private void recoveredCursor(PositionImpl position, Map properties LedgerHandle recoveredFromCursorLedger) { // if the position was at a ledger that didn't exist (since it will be deleted if it was previously empty), // we need to move to the next existing ledger - if (!ledger.ledgerExists(position.getLedgerId())) { + if (position.getEntryId() == -1L && !ledger.ledgerExists(position.getLedgerId())) { Long nextExistingLedger = ledger.getNextValidLedger(position.getLedgerId()); if (nextExistingLedger == null) { log.info("[{}] [{}] Couldn't find next next valid ledger for recovery {}", ledger.getName(), name, @@ -690,7 +691,7 @@ private void recoveredCursor(PositionImpl position, Map properties position = ledger.getLastPosition(); } log.info("[{}] Cursor {} recovered to position {}", ledger.getName(), name, position); - this.cursorProperties = cursorProperties; + this.cursorProperties = cursorProperties == null ? Collections.emptyMap() : cursorProperties; messagesConsumedCounter = -getNumberOfEntries(Range.openClosed(position, ledger.getLastPosition())); markDeletePosition = position; persistentMarkDeletePosition = position; @@ -786,6 +787,8 @@ public void asyncReadEntriesWithSkip(int numberOfEntriesToRead, long maxSizeByte int numOfEntriesToRead = applyMaxSizeCap(numberOfEntriesToRead, maxSizeBytes); PENDING_READ_OPS_UPDATER.incrementAndGet(this); + // Skip deleted entries. + skipCondition = skipCondition == null ? this::isMessageDeleted : skipCondition.or(this::isMessageDeleted); OpReadEntry op = OpReadEntry.create(this, readPosition, numOfEntriesToRead, callback, ctx, maxPosition, skipCondition); ledger.asyncReadEntries(op); @@ -939,6 +942,8 @@ public void asyncReadEntriesWithSkipOrWait(int maxEntries, long maxSizeBytes, Re asyncReadEntriesWithSkip(numberOfEntriesToRead, NO_MAX_SIZE_LIMIT, callback, ctx, maxPosition, skipCondition); } else { + // Skip deleted entries. + skipCondition = skipCondition == null ? this::isMessageDeleted : skipCondition.or(this::isMessageDeleted); OpReadEntry op = OpReadEntry.create(this, readPosition, numberOfEntriesToRead, callback, ctx, maxPosition, skipCondition); @@ -2099,7 +2104,7 @@ void internalMarkDelete(final MarkDeleteEntry mdEntry) { } }); - persistPositionToLedger(cursorLedger, mdEntry, new VoidCallback() { + VoidCallback cb = new VoidCallback() { @Override public void operationComplete() { if (log.isDebugEnabled()) { @@ -2151,7 +2156,18 @@ public void operationFailed(ManagedLedgerException exception) { mdEntry.triggerFailed(exception); } - }); + }; + + if (State.NoLedger.equals(STATE_UPDATER.get(this))) { + if (ledger.isNoMessagesAfterPos(mdEntry.newPosition)) { + persistPositionToMetaStore(mdEntry, cb); + } else { + mdEntry.callback.markDeleteFailed(new ManagedLedgerException("Create new cursor ledger failed"), + mdEntry.ctx); + } + } else { + persistPositionToLedger(cursorLedger, mdEntry, cb); + } } @Override @@ -2442,8 +2458,12 @@ List filterReadEntries(List entries) { @Override public synchronized String toString() { - return MoreObjects.toStringHelper(this).add("ledger", ledger.getName()).add("name", name) - .add("ackPos", markDeletePosition).add("readPos", readPosition).toString(); + return MoreObjects.toStringHelper(this) + .add("ledger", ledger.getName()) + .add("name", name) + .add("ackPos", markDeletePosition) + .add("readPos", readPosition) + .toString(); } @Override @@ -2483,9 +2503,15 @@ public Position getPersistentMarkDeletedPosition() { @Override public void rewind() { + rewind(false); + } + + @Override + public void rewind(boolean readCompacted) { lock.writeLock().lock(); try { - PositionImpl newReadPosition = ledger.getNextValidPosition(markDeletePosition); + PositionImpl newReadPosition = + readCompacted ? markDeletePosition.getNext() : ledger.getNextValidPosition(markDeletePosition); PositionImpl oldReadPosition = readPosition; log.info("[{}-{}] Rewind from {} to {}", ledger.getName(), name, oldReadPosition, newReadPosition); @@ -2656,32 +2682,47 @@ public void operationComplete(Void result, Stat stat) { } @Override - public void operationFailed(MetaStoreException e) { - if (e instanceof MetaStoreException.BadVersionException) { + public void operationFailed(MetaStoreException topLevelException) { + if (topLevelException instanceof MetaStoreException.BadVersionException) { log.warn("[{}] Failed to update cursor metadata for {} due to version conflict {}", - ledger.name, name, e.getMessage()); + ledger.name, name, topLevelException.getMessage()); // it means previous owner of the ml might have updated the version incorrectly. So, check // the ownership and refresh the version again. - if (ledger.mlOwnershipChecker != null && ledger.mlOwnershipChecker.get()) { - ledger.getStore().asyncGetCursorInfo(ledger.getName(), name, - new MetaStoreCallback() { - @Override - public void operationComplete(ManagedCursorInfo info, Stat stat) { - updateCursorLedgerStat(info, stat); - } - - @Override - public void operationFailed(MetaStoreException e) { - if (log.isDebugEnabled()) { - log.debug( - "[{}] Failed to refresh cursor metadata-version for {} due " - + "to {}", ledger.name, name, e.getMessage()); - } - } - }); + if (ledger.mlOwnershipChecker != null) { + ledger.mlOwnershipChecker.get().whenComplete((hasOwnership, t) -> { + if (t == null && hasOwnership) { + ledger.getStore().asyncGetCursorInfo(ledger.getName(), name, + new MetaStoreCallback<>() { + @Override + public void operationComplete(ManagedCursorInfo info, Stat stat) { + updateCursorLedgerStat(info, stat); + // fail the top level call so that the caller can retry + callback.operationFailed(topLevelException); + } + + @Override + public void operationFailed(MetaStoreException e) { + if (log.isDebugEnabled()) { + log.debug( + "[{}] Failed to refresh cursor metadata-version " + + "for {} due to {}", ledger.name, name, + e.getMessage()); + } + // fail the top level call so that the caller can retry + callback.operationFailed(topLevelException); + } + }); + } else { + // fail the top level call so that the caller can retry + callback.operationFailed(topLevelException); + } + }); + } else { + callback.operationFailed(topLevelException); } + } else { + callback.operationFailed(topLevelException); } - callback.operationFailed(e); } }); } @@ -2740,30 +2781,23 @@ public void skipNonRecoverableLedger(final long ledgerId){ if (ledgerInfo == null) { return; } - lock.writeLock().lock(); log.warn("[{}] [{}] Since the ledger [{}] is lost and the autoSkipNonRecoverableData is true, this ledger will" + " be auto acknowledge in subscription", ledger.getName(), name, ledgerId); - try { - for (int i = 0; i < ledgerInfo.getEntries(); i++) { - if (!individualDeletedMessages.contains(ledgerId, i)) { - asyncDelete(PositionImpl.get(ledgerId, i), new AsyncCallbacks.DeleteCallback() { - @Override - public void deleteComplete(Object ctx) { - // ignore. - } + asyncDelete(() -> LongStream.range(0, ledgerInfo.getEntries()) + .mapToObj(i -> (Position) PositionImpl.get(ledgerId, i)).iterator(), + new AsyncCallbacks.DeleteCallback() { + @Override + public void deleteComplete(Object ctx) { + // ignore. + } - @Override - public void deleteFailed(ManagedLedgerException ex, Object ctx) { - // The method internalMarkDelete already handled the failure operation. We only need to - // make sure the memory state is updated. - // If the broker crashed, the non-recoverable ledger will be detected again. - } - }, null); - } - } - } finally { - lock.writeLock().unlock(); - } + @Override + public void deleteFailed(ManagedLedgerException ex, Object ctx) { + // The method internalMarkDelete already handled the failure operation. We only need to + // make sure the memory state is updated. + // If the broker crashed, the non-recoverable ledger will be detected again. + } + }, null); } // ////////////////////////////////////////////////// @@ -2797,16 +2831,15 @@ public void operationComplete() { @Override public void operationFailed(ManagedLedgerException exception) { - log.error("[{}][{}] Metadata ledger creation failed", ledger.getName(), name, exception); + log.error("[{}][{}] Metadata ledger creation failed {}, try to persist the position in the metadata" + + " store.", ledger.getName(), name, exception); synchronized (pendingMarkDeleteOps) { - while (!pendingMarkDeleteOps.isEmpty()) { - MarkDeleteEntry entry = pendingMarkDeleteOps.poll(); - entry.callback.markDeleteFailed(exception, entry.ctx); - } - // At this point we don't have a ledger ready STATE_UPDATER.set(ManagedCursorImpl.this, State.NoLedger); + // Note: if the stat is NoLedger, will persist the mark deleted position to metadata store. + // Before giving up, try to persist the position in the metadata store. + flushPendingMarkDeletes(); } } }); @@ -3058,7 +3091,7 @@ void persistPositionToLedger(final LedgerHandle lh, MarkDeleteEntry mdEntry, fin if (shouldCloseLedger(lh1)) { if (log.isDebugEnabled()) { - log.debug("[{}] Need to create new metadata ledger for consumer {}", ledger.getName(), name); + log.debug("[{}] Need to create new metadata ledger for cursor {}", ledger.getName(), name); } startCreatingNewMetadataLedger(); } @@ -3073,32 +3106,39 @@ void persistPositionToLedger(final LedgerHandle lh, MarkDeleteEntry mdEntry, fin // in the meantime the mark-delete will be queued. STATE_UPDATER.compareAndSet(ManagedCursorImpl.this, State.Open, State.NoLedger); - mbean.persistToLedger(false); - // Before giving up, try to persist the position in the metadata store - persistPositionMetaStore(-1, position, mdEntry.properties, new MetaStoreCallback() { - @Override - public void operationComplete(Void result, Stat stat) { - if (log.isDebugEnabled()) { - log.debug( - "[{}][{}] Updated cursor in meta store after previous failure in ledger at position" - + " {}", ledger.getName(), name, position); - } - mbean.persistToZookeeper(true); - callback.operationComplete(); - } - - @Override - public void operationFailed(MetaStoreException e) { - log.warn("[{}][{}] Failed to update cursor in meta store after previous failure in ledger: {}", - ledger.getName(), name, e.getMessage()); - mbean.persistToZookeeper(false); - callback.operationFailed(createManagedLedgerException(rc)); - } - }, true); + // Before giving up, try to persist the position in the metadata store. + persistPositionToMetaStore(mdEntry, callback); } }, null); } + void persistPositionToMetaStore(MarkDeleteEntry mdEntry, final VoidCallback callback) { + final PositionImpl newPosition = mdEntry.newPosition; + STATE_UPDATER.compareAndSet(ManagedCursorImpl.this, State.Open, State.NoLedger); + mbean.persistToLedger(false); + // Before giving up, try to persist the position in the metadata store + persistPositionMetaStore(-1, newPosition, mdEntry.properties, new MetaStoreCallback() { + @Override + public void operationComplete(Void result, Stat stat) { + if (log.isDebugEnabled()) { + log.debug( + "[{}][{}] Updated cursor in meta store after previous failure in ledger at position" + + " {}", ledger.getName(), name, newPosition); + } + mbean.persistToZookeeper(true); + callback.operationComplete(); + } + + @Override + public void operationFailed(MetaStoreException e) { + log.warn("[{}][{}] Failed to update cursor in meta store after previous failure in ledger: {}", + ledger.getName(), name, e.getMessage()); + mbean.persistToZookeeper(false); + callback.operationFailed(createManagedLedgerException(e)); + } + }, true); + } + boolean shouldCloseLedger(LedgerHandle lh) { long now = clock.millis(); if (ledger.getFactory().isMetadataServiceAvailable() @@ -3136,7 +3176,7 @@ public void operationComplete(Void result, Stat stat) { @Override public void operationFailed(MetaStoreException e) { - log.warn("[{}] Failed to update consumer {}", ledger.getName(), name, e); + log.warn("[{}] Failed to update cursor metadata {}", ledger.getName(), name, e); // it means it failed to switch the newly created ledger so, it should be // deleted to prevent leak deleteLedgerAsync(lh).thenRun(() -> callback.operationFailed(e)); @@ -3473,7 +3513,7 @@ public ManagedCursorMXBean getStats() { return this.mbean; } - void updateReadStats(int readEntriesCount, long readEntriesSize) { + public void updateReadStats(int readEntriesCount, long readEntriesSize) { this.entriesReadCount += readEntriesCount; this.entriesReadSize += readEntriesSize; } @@ -3505,7 +3545,7 @@ public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { }, null); } - private int applyMaxSizeCap(int maxEntries, long maxSizeBytes) { + public int applyMaxSizeCap(int maxEntries, long maxSizeBytes) { if (maxSizeBytes == NO_MAX_SIZE_LIMIT) { return maxEntries; } @@ -3563,4 +3603,29 @@ public boolean isCacheReadEntry() { public ManagedLedgerConfig getConfig() { return config; } + + /*** + * Create a non-durable cursor and copy the ack stats. + */ + public ManagedCursor duplicateNonDurableCursor(String nonDurableCursorName) throws ManagedLedgerException { + NonDurableCursorImpl newNonDurableCursor = + (NonDurableCursorImpl) ledger.newNonDurableCursor(getMarkDeletedPosition(), nonDurableCursorName); + if (individualDeletedMessages != null) { + this.individualDeletedMessages.forEach(range -> { + newNonDurableCursor.individualDeletedMessages.addOpenClosed( + range.lowerEndpoint().getLedgerId(), + range.lowerEndpoint().getEntryId(), + range.upperEndpoint().getLedgerId(), + range.upperEndpoint().getEntryId()); + return true; + }); + } + if (batchDeletedIndexes != null) { + for (Map.Entry entry : this.batchDeletedIndexes.entrySet()) { + BitSetRecyclable copiedBitSet = BitSetRecyclable.valueOf(entry.getValue()); + newNonDurableCursor.batchDeletedIndexes.put(entry.getKey(), copiedBitSet); + } + } + return newNonDurableCursor; + } } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java index 9107b76c88a28..42a5216b3ce36 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java @@ -330,7 +330,7 @@ public void asyncOpen(String name, OpenLedgerCallback callback, Object ctx) { @Override public void asyncOpen(final String name, final ManagedLedgerConfig config, final OpenLedgerCallback callback, - Supplier mlOwnershipChecker, final Object ctx) { + Supplier> mlOwnershipChecker, final Object ctx) { if (closed) { callback.openLedgerFailed(new ManagedLedgerException.ManagedLedgerFactoryClosedException(), ctx); return; @@ -880,7 +880,10 @@ public void asyncDelete(String name, CompletableFuture mlCo // If it's open, delete in the normal way ml.asyncDelete(callback, ctx); }).exceptionally(ex -> { - // If it's failing to get open, just delete from metadata + // If it fails to get open, it will be cleaned by managed ledger opening error handling. + // then retry will go to `future=null` branch. + final Throwable rc = FutureUtil.unwrapCompletionException(ex); + callback.deleteLedgerFailed(getManagedLedgerException(rc), ctx); return null; }); } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryMBeanImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryMBeanImpl.java index cf3d7142d617e..5a6bc8017b7e0 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryMBeanImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryMBeanImpl.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.mledger.impl; +import static com.google.common.base.Preconditions.checkArgument; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; import org.apache.bookkeeper.mledger.ManagedLedgerFactoryMXBean; @@ -41,6 +42,7 @@ public ManagedLedgerFactoryMBeanImpl(ManagedLedgerFactoryImpl factory) throws Ex } public void refreshStats(long period, TimeUnit unit) { + checkArgument(period >= 0); double seconds = unit.toMillis(period) / 1000.0; if (seconds <= 0.0) { diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java index 10f7948f553cb..72e0b2c9d063c 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java @@ -59,7 +59,6 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReference; @@ -232,7 +231,7 @@ public class ManagedLedgerImpl implements ManagedLedger, CreateCallback { private static final Random random = new Random(System.currentTimeMillis()); private long maximumRolloverTimeMs; - protected final Supplier mlOwnershipChecker; + protected final Supplier> mlOwnershipChecker; volatile PositionImpl lastConfirmedEntry; @@ -336,7 +335,7 @@ public ManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper } public ManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper, MetaStore store, ManagedLedgerConfig config, OrderedScheduler scheduledExecutor, - final String name, final Supplier mlOwnershipChecker) { + final String name, final Supplier> mlOwnershipChecker) { this.factory = factory; this.bookKeeper = bookKeeper; this.config = config; @@ -1239,6 +1238,10 @@ public CompletableFuture getEarliestMessagePublishTimeOfPos(PositionImpl p } PositionImpl nextPos = getNextValidPosition(pos); + if (nextPos.compareTo(lastConfirmedEntry) > 0) { + return CompletableFuture.completedFuture(-1L); + } + asyncReadEntry(nextPos, new ReadEntryCallback() { @Override public void readEntryComplete(Entry entry, Object ctx) { @@ -3159,7 +3162,7 @@ public void asyncOffloadPrefix(Position pos, OffloadCallback callback, Object ct } } - private void offloadLoop(CompletableFuture promise, Queue ledgersToOffload, + void offloadLoop(CompletableFuture promise, Queue ledgersToOffload, PositionImpl firstUnoffloaded, Optional firstError) { State currentState = getState(); if (currentState == State.Closed) { @@ -3192,7 +3195,7 @@ private void offloadLoop(CompletableFuture promise, Queue config.getLedgerOffloader().offload(readHandle, uuid, extraMetadata)) .thenCompose((ignore) -> { return Retries.run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1), - TimeUnit.SECONDS.toHours(1)).limit(10), + TimeUnit.HOURS.toMillis(1)).limit(10), FAIL_ON_CONFLICT, () -> completeLedgerInfoForOffloaded(ledgerId, uuid), scheduledExecutor, name) @@ -3207,6 +3210,7 @@ private void offloadLoop(CompletableFuture promise, Queue= 0) { + return true; + } + if (specifiedLac.getEntryId() < 0) { + // Calculate the meaningful LAC. + PositionImpl actLac = getPreviousPosition(specifiedLac); + if (actLac.getEntryId() >= 0) { + return pos.compareTo(actLac) >= 0; + } else { + // If the actual LAC is still not meaningful. + if (actLac.equals(specifiedLac)) { + // No entries in maneged ledger. + return true; + } else { + // Continue to find a valid LAC. + return isNoMessagesAfterPosForSpecifiedLac(actLac, pos); + } + } + } + return false; + } + /** * Get the entry position that come before the specified position in the message stream, using information from the * ledger list and each ledger entries count. @@ -3953,7 +3985,7 @@ public static ManagedLedgerException createManagedLedgerException(Throwable t) { */ protected void asyncCreateLedger(BookKeeper bookKeeper, ManagedLedgerConfig config, DigestType digestType, CreateCallback cb, Map metadata) { - AtomicBoolean ledgerCreated = new AtomicBoolean(false); + CompletableFuture ledgerFutureHook = new CompletableFuture<>(); Map finalMetadata = new HashMap<>(); finalMetadata.putAll(ledgerMetadata); finalMetadata.putAll(metadata); @@ -3966,33 +3998,39 @@ protected void asyncCreateLedger(BookKeeper bookKeeper, ManagedLedgerConfig conf )); } catch (EnsemblePlacementPolicyConfig.ParseEnsemblePlacementPolicyConfigException e) { log.error("[{}] Serialize the placement configuration failed", name, e); - cb.createComplete(Code.UnexpectedConditionException, null, ledgerCreated); + cb.createComplete(Code.UnexpectedConditionException, null, ledgerFutureHook); return; } } createdLedgerCustomMetadata = finalMetadata; - try { bookKeeper.asyncCreateLedger(config.getEnsembleSize(), config.getWriteQuorumSize(), - config.getAckQuorumSize(), digestType, config.getPassword(), cb, ledgerCreated, finalMetadata); + config.getAckQuorumSize(), digestType, config.getPassword(), cb, ledgerFutureHook, finalMetadata); } catch (Throwable cause) { log.error("[{}] Encountered unexpected error when creating ledger", name, cause); - cb.createComplete(Code.UnexpectedConditionException, null, ledgerCreated); + ledgerFutureHook.completeExceptionally(cause); + cb.createComplete(Code.UnexpectedConditionException, null, ledgerFutureHook); return; } - scheduledExecutor.schedule(() -> { - if (!ledgerCreated.get()) { + + ScheduledFuture timeoutChecker = scheduledExecutor.schedule(() -> { + if (!ledgerFutureHook.isDone() + && ledgerFutureHook.completeExceptionally(new TimeoutException(name + " Create ledger timeout"))) { if (log.isDebugEnabled()) { log.debug("[{}] Timeout creating ledger", name); } - cb.createComplete(BKException.Code.TimeoutException, null, ledgerCreated); + cb.createComplete(BKException.Code.TimeoutException, null, ledgerFutureHook); } else { if (log.isDebugEnabled()) { log.debug("[{}] Ledger already created when timeout task is triggered", name); } } }, config.getMetadataOperationsTimeoutSeconds(), TimeUnit.SECONDS); + + ledgerFutureHook.whenComplete((ignore, ex) -> { + timeoutChecker.cancel(false); + }); } public Clock getClock() { @@ -4001,16 +4039,12 @@ public Clock getClock() { /** * check if ledger-op task is already completed by timeout-task. If completed then delete the created ledger - * - * @param rc - * @param lh - * @param ctx * @return */ protected boolean checkAndCompleteLedgerOpTask(int rc, LedgerHandle lh, Object ctx) { - if (ctx instanceof AtomicBoolean) { + if (ctx instanceof CompletableFuture) { // ledger-creation is already timed out and callback is already completed so, delete this ledger and return. - if (((AtomicBoolean) (ctx)).compareAndSet(false, true)) { + if (((CompletableFuture) ctx).complete(lh)) { return false; } else { if (rc == BKException.Code.OK) { @@ -4369,9 +4403,10 @@ private void cancelScheduledTasks() { } @Override - public void checkInactiveLedgerAndRollOver() { + public boolean checkInactiveLedgerAndRollOver() { long currentTimeMs = System.currentTimeMillis(); - if (inactiveLedgerRollOverTimeMs > 0 && currentTimeMs > (lastAddEntryTimeMs + inactiveLedgerRollOverTimeMs)) { + if (currentLedgerEntries > 0 && inactiveLedgerRollOverTimeMs > 0 && currentTimeMs > (lastAddEntryTimeMs + + inactiveLedgerRollOverTimeMs)) { log.info("[{}] Closing inactive ledger, last-add entry {}", name, lastAddEntryTimeMs); if (STATE_UPDATER.compareAndSet(this, State.LedgerOpened, State.ClosingLedger)) { LedgerHandle currentLedger = this.currentLedger; @@ -4390,10 +4425,13 @@ public void checkInactiveLedgerAndRollOver() { } ledgerClosed(lh); + createLedgerAfterClosed(); // we do not create ledger here, since topic is inactive for a long time. }, null); + return true; } } + return false; } @@ -4446,4 +4484,4 @@ public Position getTheSlowestNonDurationReadPosition() { } return theSlowestNonDurableReadPosition; } -} +} \ No newline at end of file diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java index e057dee99538e..7884add95526c 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.mledger.impl; +import static com.google.common.base.Preconditions.checkArgument; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; import org.apache.bookkeeper.mledger.ManagedCursor; @@ -63,6 +64,7 @@ public ManagedLedgerMBeanImpl(ManagedLedgerImpl managedLedger) { } public void refreshStats(long period, TimeUnit unit) { + checkArgument(period >= 0); double seconds = unit.toMillis(period) / 1000.0; if (seconds <= 0.0) { // skip refreshing stats diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java index 9d2829b1707f4..51e56158cad55 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java @@ -138,8 +138,12 @@ public void rewind() { @Override public synchronized String toString() { - return MoreObjects.toStringHelper(this).add("ledger", ledger.getName()).add("ackPos", markDeletePosition) - .add("readPos", readPosition).toString(); + return MoreObjects.toStringHelper(this) + .add("ledger", ledger.getName()) + .add("cursor", getName()) + .add("ackPos", markDeletePosition) + .add("readPos", readPosition) + .toString(); } private static final Logger log = LoggerFactory.getLogger(NonDurableCursorImpl.class); diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyCursorImpl.java index 9102339b2904e..1661613f07d7d 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyCursorImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ReadOnlyCursorImpl.java @@ -23,6 +23,7 @@ import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; +import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.ReadOnlyCursor; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.PositionBound; import org.apache.bookkeeper.mledger.proto.MLDataFormats; @@ -70,4 +71,9 @@ public MLDataFormats.ManagedLedgerInfo.LedgerInfo getCurrentLedgerInfo() { public long getNumberOfEntries(Range range) { return this.ledger.getNumberOfEntries(range); } + + @Override + public boolean isMessageDeleted(Position position) { + return false; + } } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java index b33dd87543f77..8b2742d958783 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.AsyncCallback; @@ -50,7 +51,7 @@ public class ShadowManagedLedgerImpl extends ManagedLedgerImpl { public ShadowManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper, MetaStore store, ManagedLedgerConfig config, OrderedScheduler scheduledExecutor, - String name, final Supplier mlOwnershipChecker) { + String name, final Supplier> mlOwnershipChecker) { super(factory, bookKeeper, store, config, scheduledExecutor, name, mlOwnershipChecker); this.sourceMLName = config.getShadowSourceName(); } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java index 550626f76c000..9c9feb2aa7f7c 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/offload/OffloadUtils.java @@ -198,7 +198,7 @@ public static CompletableFuture cleanupOffloaded(long ledgerId, UUID uuid, metadataMap.put("ManagedLedgerName", name); return Retries.run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1), - TimeUnit.SECONDS.toHours(1)).limit(10), + TimeUnit.HOURS.toMillis(1)).limit(10), Retries.NonFatalPredicate, () -> mlConfig.getLedgerOffloader().deleteOffloaded(ledgerId, uuid, metadataMap), executor, name).whenComplete((ignored, exception) -> { diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java index 7599e2cc1874f..d34857e5e5177 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java @@ -27,7 +27,6 @@ import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.lang3.tuple.Pair; /** @@ -74,13 +73,18 @@ public RangeCache(Weighter weighter, TimestampExtractor timestampE * @return whether the entry was inserted in the cache */ public boolean put(Key key, Value value) { - MutableBoolean flag = new MutableBoolean(); - entries.computeIfAbsent(key, (k) -> { - size.addAndGet(weighter.getSize(value)); - flag.setValue(true); - return value; - }); - return flag.booleanValue(); + // retain value so that it's not released before we put it in the cache and calculate the weight + value.retain(); + try { + if (entries.putIfAbsent(key, value) == null) { + size.addAndGet(weighter.getSize(value)); + return true; + } else { + return false; + } + } finally { + value.release(); + } } public boolean exists(Key key) { @@ -242,7 +246,6 @@ public synchronized Pair clear() { value.release(); } - entries.clear(); size.getAndAdd(-removedSize); return Pair.of(removedCount, removedSize); } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java index 1b1b5534256f9..644f53c3a522d 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java @@ -43,6 +43,7 @@ import java.util.Arrays; import java.util.BitSet; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -65,6 +66,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; import lombok.Cleanup; @@ -72,6 +74,7 @@ import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.BookKeeper.DigestType; import org.apache.bookkeeper.client.LedgerEntry; +import org.apache.bookkeeper.client.api.ReadHandle; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.AddEntryCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteCallback; @@ -229,6 +232,97 @@ void readTwice() throws Exception { entries.forEach(Entry::release); } + @Test + void testPersistentMarkDeleteIfCreateCursorLedgerFailed() throws Exception { + final int entryCount = 10; + final String cursorName = "c1"; + final String mlName = "ml_test"; + final ManagedLedgerConfig mlConfig = new ManagedLedgerConfig().setMaxEntriesPerLedger(1); + ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open(mlName, mlConfig); + + ManagedCursor cursor = ml.openCursor("c1"); + Position lastEntry = null; + for (int i = 0; i < 10; i++) { + lastEntry = ml.addEntry(("entry-" + i).getBytes(Encoding)); + } + + // Mock cursor ledger create failed. + bkc.failNow(BKException.Code.NoBookieAvailableException); + + cursor.markDelete(lastEntry); + + // Assert persist mark deleted position to ZK was successful. + PositionImpl slowestReadPosition = ml.getCursors().getSlowestReaderPosition(); + assertTrue(slowestReadPosition.getLedgerId() >= lastEntry.getLedgerId()); + assertTrue(slowestReadPosition.getEntryId() >= lastEntry.getEntryId()); + assertEquals(cursor.getStats().getPersistLedgerSucceed(), 0); + assertTrue(cursor.getStats().getPersistZookeeperSucceed() > 0); + assertEquals(cursor.getPersistentMarkDeletedPosition(), lastEntry); + + // Verify the mark delete position can be recovered properly. + ml.close(); + ml = (ManagedLedgerImpl) factory.open(mlName, mlConfig); + ManagedCursorImpl cursorRecovered = (ManagedCursorImpl) ml.openCursor(cursorName); + assertEquals(cursorRecovered.getPersistentMarkDeletedPosition(), lastEntry); + + // cleanup. + ml.delete(); + } + + @Test + void testPersistentMarkDeleteIfSwitchCursorLedgerFailed() throws Exception { + final int entryCount = 10; + final String cursorName = "c1"; + final String mlName = "ml_test"; + final ManagedLedgerConfig mlConfig = new ManagedLedgerConfig().setMaxEntriesPerLedger(1); + ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open(mlName, mlConfig); + + final ManagedCursorImpl cursor = (ManagedCursorImpl) ml.openCursor(cursorName); + ArrayList positions = new ArrayList<>(); + for (int i = 0; i < entryCount; i++) { + positions.add(ml.addEntry(("entry-" + i).getBytes(Encoding))); + } + // Trigger the cursor ledger creating. + cursor.markDelete(positions.get(0)); + assertTrue(cursor.getStats().getPersistLedgerSucceed() > 0); + + // Mock cursor ledger write failed. + bkc.addEntryFailAfter(0, BKException.Code.NoBookieAvailableException); + // Trigger a failed writing of the cursor ledger, then wait the stat of cursor to be "NoLedger". + // This time ZK will be written due to a failure to write BK. + cursor.markDelete(positions.get(1)); + Awaitility.await().untilAsserted(() -> { + assertEquals(cursor.getState(), "NoLedger"); + }); + assertTrue(cursor.getStats().getPersistLedgerErrors() > 0); + long persistZookeeperSucceed1 = cursor.getStats().getPersistZookeeperSucceed(); + assertTrue(persistZookeeperSucceed1 > 0); + + // Mock cursor ledger create failed. + bkc.failNow(BKException.Code.NoBookieAvailableException); + // Verify the cursor status will be persistent to ZK even if the cursor ledger creation always fails. + // This time ZK will be written due to catch up. + Position lastEntry = positions.get(entryCount -1); + cursor.markDelete(lastEntry); + long persistZookeeperSucceed2 = cursor.getStats().getPersistZookeeperSucceed(); + assertTrue(persistZookeeperSucceed2 > persistZookeeperSucceed1); + + // Assert persist mark deleted position to ZK was successful. + PositionImpl slowestReadPosition = ml.getCursors().getSlowestReaderPosition(); + assertTrue(slowestReadPosition.getLedgerId() >= lastEntry.getLedgerId()); + assertTrue(slowestReadPosition.getEntryId() >= lastEntry.getEntryId()); + assertEquals(cursor.getPersistentMarkDeletedPosition(), lastEntry); + + // Verify the mark delete position can be recovered properly. + ml.close(); + ml = (ManagedLedgerImpl) factory.open(mlName, mlConfig); + ManagedCursorImpl cursorRecovered = (ManagedCursorImpl) ml.openCursor(cursorName); + assertEquals(cursorRecovered.getPersistentMarkDeletedPosition(), lastEntry); + + // cleanup. + ml.delete(); + } + @Test(timeOut = 20000) void readWithCacheDisabled() throws Exception { ManagedLedgerFactoryConfig config = new ManagedLedgerFactoryConfig(); @@ -675,7 +769,7 @@ void testResetCursor() throws Exception { @Test(timeOut = 20000) void testResetCursor1() throws Exception { ManagedLedger ledger = factory.open("my_test_move_cursor_ledger", - new ManagedLedgerConfig().setMaxEntriesPerLedger(2)); + new ManagedLedgerConfig().setMaxEntriesPerLedger(2)); ManagedCursor cursor = ledger.openCursor("trc1"); PositionImpl actualEarliest = (PositionImpl) ledger.addEntry("dummy-entry-1".getBytes(Encoding)); ledger.addEntry("dummy-entry-2".getBytes(Encoding)); @@ -1421,6 +1515,17 @@ void errorRecoveringCursor2() throws Exception { ledger = factory2.open("my_test_ledger"); ManagedCursor cursor = ledger.openCursor("c1"); Position position = ledger.addEntry("test".getBytes()); + // Make persist zk fail once. + AtomicInteger persistZKTimes = new AtomicInteger(); + metadataStore.failConditional(new MetadataStoreException.BadVersionException("mock ex"), (type, path) -> { + if (FaultInjectionMetadataStore.OperationType.PUT.equals(type) + && path.equals("/managed-ledgers/my_test_ledger/c1")) { + if (persistZKTimes.incrementAndGet() == 1) { + return true; + } + } + return false; + }); try { cursor.markDelete(position); fail("should have failed"); @@ -2184,7 +2289,7 @@ void testFindNewestMatchingEdgeCase1() throws Exception { ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1"); assertNull(c1.findNewestMatching( - entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding)))); + entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding)))); } @Test(timeOut = 20000) @@ -2493,7 +2598,7 @@ public void findEntryComplete(Position position, Object ctx) { @Override public void findEntryFailed(ManagedLedgerException exception, Optional failedReadPosition, - Object ctx) { + Object ctx) { result.exception = exception; counter.countDown(); } @@ -2519,7 +2624,7 @@ public void findEntryFailed(ManagedLedgerException exception, Optional } void internalTestFindNewestMatchingAllEntries(final String name, final int entriesPerLedger, - final int expectedEntryId) throws Exception { + final int expectedEntryId) throws Exception { final String ledgerAndCursorName = name; ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setRetentionSizeInMB(10); @@ -2613,7 +2718,7 @@ void testReplayEntries() throws Exception { assertTrue((Arrays.equals(entries.get(0).getData(), "entry1".getBytes(Encoding)) && Arrays.equals(entries.get(1).getData(), "entry3".getBytes(Encoding))) || (Arrays.equals(entries.get(0).getData(), "entry3".getBytes(Encoding)) - && Arrays.equals(entries.get(1).getData(), "entry1".getBytes(Encoding)))); + && Arrays.equals(entries.get(1).getData(), "entry1".getBytes(Encoding)))); entries.forEach(Entry::release); // 3. Fail on reading non-existing position @@ -3040,7 +3145,7 @@ public void operationFailed(ManagedLedgerException exception) { try { bkc.openLedgerNoRecovery(ledgerId, DigestType.fromApiDigestType(mlConfig.getDigestType()), - mlConfig.getPassword()); + mlConfig.getPassword()); fail("ledger should have deleted due to update-cursor failure"); } catch (BKException e) { // ok @@ -3659,17 +3764,17 @@ private void deleteBatchIndex(ManagedCursor cursor, Position position, int batch pos.ackSet = bitSet.toLongArray(); cursor.asyncDelete(pos, - new DeleteCallback() { - @Override - public void deleteComplete(Object ctx) { - latch.countDown(); - } + new DeleteCallback() { + @Override + public void deleteComplete(Object ctx) { + latch.countDown(); + } - @Override - public void deleteFailed(ManagedLedgerException exception, Object ctx) { - latch.countDown(); - } - }, null); + @Override + public void deleteFailed(ManagedLedgerException exception, Object ctx) { + latch.countDown(); + } + }, null); latch.await(); pos.ackSet = null; } @@ -4382,5 +4487,202 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { ledger.close(); } + + @Test + public void testReadEntriesWithSkipDeletedEntries() throws Exception { + @Cleanup + ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("testReadEntriesWithSkipDeletedEntries"); + ledger = Mockito.spy(ledger); + List actualReadEntryIds = new ArrayList<>(); + Mockito.doAnswer(inv -> { + long start = inv.getArgument(1); + long end = inv.getArgument(2); + for (long i = start; i <= end; i++) { + actualReadEntryIds.add(i); + } + return inv.callRealMethod(); + }) + .when(ledger) + .asyncReadEntry(Mockito.any(ReadHandle.class), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(), Mockito.any()); + @Cleanup + ManagedCursor cursor = ledger.openCursor("c"); + + int entries = 20; + Position maxReadPosition = null; + Map map = new HashMap<>(); + for (int i = 0; i < entries; i++) { + maxReadPosition = ledger.addEntry(new byte[1024]); + map.put(i, maxReadPosition); + } + + + Set deletedPositions = new HashSet<>(); + deletedPositions.add(map.get(1)); + deletedPositions.add(map.get(4)); + deletedPositions.add(map.get(5)); + deletedPositions.add(map.get(8)); + deletedPositions.add(map.get(9)); + deletedPositions.add(map.get(10)); + deletedPositions.add(map.get(15)); + deletedPositions.add(map.get(17)); + deletedPositions.add(map.get(19)); + cursor.delete(deletedPositions); + + CompletableFuture f0 = new CompletableFuture<>(); + List readEntries = new ArrayList<>(); + cursor.asyncReadEntries(5, -1L, new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntries.addAll(entries); + f0.complete(null); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + f0.completeExceptionally(exception); + } + }, null, PositionImpl.get(maxReadPosition.getLedgerId(), maxReadPosition.getEntryId()).getNext()); + + f0.get(); + + CompletableFuture f1 = new CompletableFuture<>(); + cursor.asyncReadEntries(5, -1L, new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntries.addAll(entries); + f1.complete(null); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + f1.completeExceptionally(exception); + } + }, null, PositionImpl.get(maxReadPosition.getLedgerId(), maxReadPosition.getEntryId()).getNext()); + + + f1.get(); + CompletableFuture f2 = new CompletableFuture<>(); + cursor.asyncReadEntries(100, -1L, new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntries.addAll(entries); + f2.complete(null); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + f2.completeExceptionally(exception); + } + }, null, PositionImpl.get(maxReadPosition.getLedgerId(), maxReadPosition.getEntryId()).getNext()); + + f2.get(); + + Position cursorReadPosition = cursor.getReadPosition(); + Position expectReadPosition = maxReadPosition.getNext(); + assertTrue(cursorReadPosition.getLedgerId() == expectReadPosition.getLedgerId() + && cursorReadPosition.getEntryId() == expectReadPosition.getEntryId()); + + assertEquals(readEntries.size(), actualReadEntryIds.size()); + assertEquals(entries - deletedPositions.size(), actualReadEntryIds.size()); + for (Entry entry : readEntries) { + long entryId = entry.getEntryId(); + assertTrue(actualReadEntryIds.contains(entryId)); + } + } + + + @Test + public void testReadEntriesWithSkipDeletedEntriesAndWithSkipConditions() throws Exception { + @Cleanup + ManagedLedgerImpl ledger = (ManagedLedgerImpl) + factory.open("testReadEntriesWithSkipDeletedEntriesAndWithSkipConditions"); + ledger = Mockito.spy(ledger); + + List actualReadEntryIds = new ArrayList<>(); + Mockito.doAnswer(inv -> { + long start = inv.getArgument(1); + long end = inv.getArgument(2); + for (long i = start; i <= end; i++) { + actualReadEntryIds.add(i); + } + return inv.callRealMethod(); + }) + .when(ledger) + .asyncReadEntry(Mockito.any(ReadHandle.class), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(), Mockito.any()); + @Cleanup + ManagedCursor cursor = ledger.openCursor("c"); + + int entries = 20; + Position maxReadPosition0 = null; + Map map = new HashMap<>(); + for (int i = 0; i < entries; i++) { + maxReadPosition0 = ledger.addEntry(new byte[1024]); + map.put(i, maxReadPosition0); + } + + PositionImpl maxReadPosition = + PositionImpl.get(maxReadPosition0.getLedgerId(), maxReadPosition0.getEntryId()).getNext(); + + Set deletedPositions = new HashSet<>(); + deletedPositions.add(map.get(1)); + deletedPositions.add(map.get(3)); + deletedPositions.add(map.get(5)); + cursor.delete(deletedPositions); + + Set skippedPositions = new HashSet<>(); + skippedPositions.add(map.get(6).getEntryId()); + skippedPositions.add(map.get(7).getEntryId()); + skippedPositions.add(map.get(8).getEntryId()); + skippedPositions.add(map.get(11).getEntryId()); + skippedPositions.add(map.get(15).getEntryId()); + skippedPositions.add(map.get(16).getEntryId()); + + Predicate skipCondition = position -> skippedPositions.contains(position.getEntryId()); + List readEntries = new ArrayList<>(); + + CompletableFuture f0 = new CompletableFuture<>(); + cursor.asyncReadEntriesWithSkip(10, -1L, new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntries.addAll(entries); + f0.complete(null); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + f0.completeExceptionally(exception); + } + }, null, maxReadPosition, skipCondition); + + f0.get(); + CompletableFuture f1 = new CompletableFuture<>(); + cursor.asyncReadEntriesWithSkip(100, -1L, new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntries.addAll(entries); + f1.complete(null); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + f1.completeExceptionally(exception); + } + }, null, maxReadPosition, skipCondition); + f1.get(); + + + assertEquals(actualReadEntryIds.size(), readEntries.size()); + assertEquals(entries - deletedPositions.size() - skippedPositions.size(), actualReadEntryIds.size()); + for (Entry entry : readEntries) { + long entryId = entry.getEntryId(); + assertTrue(actualReadEntryIds.contains(entryId)); + } + + Position cursorReadPosition = cursor.getReadPosition(); + Position expectReadPosition = maxReadPosition; + assertTrue(cursorReadPosition.getLedgerId() == expectReadPosition.getLedgerId() + && cursorReadPosition.getEntryId() == expectReadPosition.getEntryId()); + } + private static final Logger log = LoggerFactory.getLogger(ManagedCursorTest.class); } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java index 70ddbb9998fd8..34f65dfd00ee8 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java @@ -21,7 +21,9 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; @@ -55,17 +57,21 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.Optional; +import java.util.Queue; import java.util.Set; import java.util.UUID; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.FutureTask; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -90,6 +96,8 @@ import org.apache.bookkeeper.client.api.LedgerEntries; import org.apache.bookkeeper.client.api.LedgerMetadata; import org.apache.bookkeeper.client.api.ReadHandle; +import org.apache.bookkeeper.common.util.BoundedScheduledExecutorService; +import org.apache.bookkeeper.common.util.OrderedScheduler; import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.AddEntryCallback; @@ -124,6 +132,7 @@ import org.apache.bookkeeper.mledger.util.Futures; import org.apache.bookkeeper.test.MockedBookKeeperTestCase; import org.apache.commons.lang3.exception.ExceptionUtils; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.lang3.mutable.MutableObject; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.common.api.proto.CommandSubscribe.InitialPosition; @@ -135,6 +144,7 @@ import org.apache.pulsar.metadata.api.extended.SessionEvent; import org.apache.pulsar.metadata.impl.FaultInjectionMetadataStore; import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.DataProvider; @@ -3085,9 +3095,9 @@ public void testManagedLedgerWithCreateLedgerTimeOut() throws Exception { latch.await(config.getMetadataOperationsTimeoutSeconds() + 2, TimeUnit.SECONDS); assertEquals(response.get(), BKException.Code.TimeoutException); - assertTrue(ctxHolder.get() instanceof AtomicBoolean); - AtomicBoolean ledgerCreated = (AtomicBoolean) ctxHolder.get(); - assertFalse(ledgerCreated.get()); + assertTrue(ctxHolder.get() instanceof CompletableFuture); + CompletableFuture ledgerCreateHook = (CompletableFuture) ctxHolder.get(); + assertTrue(ledgerCreateHook.isCompletedExceptionally()); ledger.close(); } @@ -3389,7 +3399,7 @@ public void openCursorFailed(ManagedLedgerException exception, Object ctx) { @Override public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { } - }, checkOwnershipFlag ? () -> true : null, null); + }, checkOwnershipFlag ? () -> CompletableFuture.completedFuture(true) : null, null); latch.await(); } @@ -3888,6 +3898,30 @@ public void testInactiveLedgerRollOver() throws Exception { factory.shutdown(); } + @Test + public void testDontRollOverEmptyInactiveLedgers() throws Exception { + int inactiveLedgerRollOverTimeMs = 5; + ManagedLedgerFactoryConfig factoryConf = new ManagedLedgerFactoryConfig(); + @Cleanup("shutdown") + ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc); + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setInactiveLedgerRollOverTime(inactiveLedgerRollOverTimeMs, TimeUnit.MILLISECONDS); + ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("rollover_inactive", config); + ManagedCursor cursor = ledger.openCursor("c1"); + + long ledgerId = ledger.currentLedger.getId(); + + Thread.sleep(inactiveLedgerRollOverTimeMs * 5); + ledger.checkInactiveLedgerAndRollOver(); + + Thread.sleep(inactiveLedgerRollOverTimeMs * 5); + ledger.checkInactiveLedgerAndRollOver(); + + assertEquals(ledger.currentLedger.getId(), ledgerId); + + ledger.close(); + } + @Test public void testOffloadTaskCancelled() throws Exception { ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc); @@ -3968,6 +4002,70 @@ public void testGetEnsemblesAsync() throws Exception { Assert.assertFalse(managedLedger.ledgerCache.containsKey(lastLedger)); } + @Test + public void testIsNoMessagesAfterPos() throws Exception { + final byte[] data = new byte[]{1,2,3}; + final String cursorName = "c1"; + final String mlName = UUID.randomUUID().toString().replaceAll("-", ""); + final ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open(mlName); + final ManagedCursor managedCursor = ml.openCursor(cursorName); + + // One ledger. + PositionImpl p1 = (PositionImpl) ml.addEntry(data); + PositionImpl p2 = (PositionImpl) ml.addEntry(data); + PositionImpl p3 = (PositionImpl) ml.addEntry(data); + assertFalse(ml.isNoMessagesAfterPos(p1)); + assertFalse(ml.isNoMessagesAfterPos(p2)); + assertTrue(ml.isNoMessagesAfterPos(p3)); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p3.getLedgerId(), p3.getEntryId() + 1))); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p3.getLedgerId() + 1, -1))); + + // More than one ledger. + ml.ledgerClosed(ml.currentLedger); + PositionImpl p4 = (PositionImpl) ml.addEntry(data); + PositionImpl p5 = (PositionImpl) ml.addEntry(data); + PositionImpl p6 = (PositionImpl) ml.addEntry(data); + assertFalse(ml.isNoMessagesAfterPos(p1)); + assertFalse(ml.isNoMessagesAfterPos(p2)); + assertFalse(ml.isNoMessagesAfterPos(p3)); + assertFalse(ml.isNoMessagesAfterPos(p4)); + assertFalse(ml.isNoMessagesAfterPos(p5)); + assertTrue(ml.isNoMessagesAfterPos(p6)); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId(), p6.getEntryId() + 1))); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId() + 1, -1))); + + // Switch ledger and make the entry id of Last confirmed entry is -1; + ml.ledgerClosed(ml.currentLedger); + ml.createLedgerAfterClosed(); + Awaitility.await().untilAsserted(() -> { + assertEquals(ml.currentLedgerEntries, 0); + }); + ml.lastConfirmedEntry = PositionImpl.get(ml.currentLedger.getId(), -1); + assertFalse(ml.isNoMessagesAfterPos(p5)); + assertTrue(ml.isNoMessagesAfterPos(p6)); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId(), p6.getEntryId() + 1))); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId() + 1, -1))); + + // Trim ledgers to make there is no entries in ML. + ml.deleteCursor(cursorName); + CompletableFuture future = new CompletableFuture<>(); + ml.trimConsumedLedgersInBackground(true, future); + future.get(); + assertEquals(ml.ledgers.size(), 1); + assertEquals(ml.lastConfirmedEntry.getEntryId(), -1); + assertTrue(ml.isNoMessagesAfterPos(p1)); + assertTrue(ml.isNoMessagesAfterPos(p2)); + assertTrue(ml.isNoMessagesAfterPos(p3)); + assertTrue(ml.isNoMessagesAfterPos(p4)); + assertTrue(ml.isNoMessagesAfterPos(p5)); + assertTrue(ml.isNoMessagesAfterPos(p6)); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId(), p6.getEntryId() + 1))); + assertTrue(ml.isNoMessagesAfterPos(PositionImpl.get(p6.getLedgerId() + 1, -1))); + + // cleanup. + ml.close(); + } + @Test public void testGetEstimatedBacklogSize() throws Exception { ManagedLedgerConfig config = new ManagedLedgerConfig(); @@ -4010,4 +4108,127 @@ public void operationFailed(MetaStoreException e) { }); future.join(); } + + @Test + public void testNonDurableCursorCreateForInactiveLedger() throws Exception { + String mlName = "testLedgerInfoMetaCorrectIfAddEntryTimeOut"; + BookKeeper spyBookKeeper = spy(bkc); + ManagedLedgerFactoryImpl factory = new ManagedLedgerFactoryImpl(metadataStore, spyBookKeeper); + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setInactiveLedgerRollOverTime(10, TimeUnit.MILLISECONDS); + ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open(mlName, config); + ml.addEntry("entry".getBytes(UTF_8)); + + MutableBoolean isRolledOver = new MutableBoolean(false); + retryStrategically((test) -> { + if (isRolledOver.booleanValue()) { + return true; + } + isRolledOver.setValue(ml.checkInactiveLedgerAndRollOver()); + return isRolledOver.booleanValue(); + }, 5, 1000); + assertTrue(isRolledOver.booleanValue()); + + Position Position = new PositionImpl(-1L, -1L); + assertNotNull(ml.newNonDurableCursor(Position)); + } + + /*** + * When a ML tries to create a ledger, it will create a delay task to check if the ledger create request is timeout. + * But we should guarantee that the delay task should be canceled after the ledger create request responded. + */ + @Test + public void testNoOrphanScheduledTasksAfterCloseML() throws Exception { + String mlName = UUID.randomUUID().toString(); + ManagedLedgerFactoryImpl factory = new ManagedLedgerFactoryImpl(metadataStore, bkc); + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setMetadataOperationsTimeoutSeconds(3600); + + // Calculate pending task count. + long pendingTaskCountBefore = calculatePendingTaskCount(factory.getScheduledExecutor()); + // Trigger create & close ML 1000 times. + for (int i = 0; i < 1000; i++) { + ManagedLedger ml = factory.open(mlName, config); + ml.close(); + } + // Verify there is no orphan scheduled task. + long pendingTaskCountAfter = calculatePendingTaskCount(factory.getScheduledExecutor()); + // Maybe there are other components also appended scheduled tasks, so leave 100 tasks to avoid flaky. + assertTrue(pendingTaskCountAfter - pendingTaskCountBefore < 100); + } + + /** + * Calculate how many pending tasks in {@link OrderedScheduler} + */ + private long calculatePendingTaskCount(OrderedScheduler orderedScheduler) { + ExecutorService[] threads = WhiteboxImpl.getInternalState(orderedScheduler, "threads"); + long taskCounter = 0; + for (ExecutorService thread : threads) { + BoundedScheduledExecutorService boundedScheduledExecutorService = + WhiteboxImpl.getInternalState(thread, "delegate"); + BlockingQueue queue = WhiteboxImpl.getInternalState(boundedScheduledExecutorService, "queue"); + for (Runnable r : queue) { + if (r instanceof FutureTask) { + FutureTask futureTask = (FutureTask) r; + if (!futureTask.isCancelled() && !futureTask.isDone()) { + taskCounter++; + } + } else { + taskCounter++; + } + } + } + return taskCounter; + } + + @Test + public void testNoCleanupOffloadLedgerWhenMetadataExceptionHappens() throws Exception { + ManagedLedgerConfig config = spy(new ManagedLedgerConfig()); + ManagedLedgerImpl ml = spy((ManagedLedgerImpl) factory.open("testNoCleanupOffloadLedger", config)); + + // mock the ledger offloader + LedgerOffloader ledgerOffloader = mock(NullLedgerOffloader.class); + when(config.getLedgerOffloader()).thenReturn(ledgerOffloader); + when(ledgerOffloader.getOffloadDriverName()).thenReturn("mock"); + + // There will have two put call to the metadata store, the first time is prepare the offload. + // And the second is the complete the offload. This case is testing when completing the offload, + // the metadata store meets an exception. + AtomicInteger metadataPutCallCount = new AtomicInteger(0); + metadataStore.failConditional(new MetadataStoreException("mock completion error"), + (key, value) -> key.equals(FaultInjectionMetadataStore.OperationType.PUT) && + metadataPutCallCount.incrementAndGet() == 2); + + // prepare the arguments for the offloadLoop method + CompletableFuture future = new CompletableFuture<>(); + Queue ledgersToOffload = new LinkedList<>(); + LedgerInfo ledgerInfo = LedgerInfo.getDefaultInstance().toBuilder().setLedgerId(1).setEntries(10).build(); + ledgersToOffload.add(ledgerInfo); + PositionImpl firstUnoffloaded = new PositionImpl(1, 0); + Optional firstError = Optional.empty(); + + // mock the read handle to make the offload successful + CompletableFuture readHandle = new CompletableFuture<>(); + readHandle.complete(mock(ReadHandle.class)); + doReturn(readHandle).when(ml).getLedgerHandle(eq(ledgerInfo.getLedgerId())); + when(ledgerOffloader.offload(any(), any(), anyMap())).thenReturn(CompletableFuture.completedFuture(null)); + + ml.ledgers.put(ledgerInfo.getLedgerId(), ledgerInfo); + + // do the offload + ml.offloadLoop(future, ledgersToOffload, firstUnoffloaded, firstError); + + // waiting for the offload complete + try { + future.join(); + fail("The offload should fail"); + } catch (Exception e) { + // the offload should fail + assertTrue(e.getCause().getMessage().contains("mock completion error")); + } + + // the ledger deletion shouldn't happen + verify(ledgerOffloader, times(0)) + .deleteOffloaded(eq(ledgerInfo.getLedgerId()), any(), anyMap()); + } } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java index 1ad3f5f8de631..1e1f7df0a46d5 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java @@ -80,7 +80,8 @@ void readFromEmptyLedger() throws Exception { entries.forEach(Entry::release); // Test string representation - assertEquals(c1.toString(), "NonDurableCursorImpl{ledger=my_test_ledger, ackPos=3:-1, readPos=3:1}"); + assertEquals(c1.toString(), "NonDurableCursorImpl{ledger=my_test_ledger, cursor=" + + c1.getName() + ", ackPos=3:-1, readPos=3:1}"); } @Test(timeOut = 20000) diff --git a/pom.xml b/pom.xml index 7e3538dc6a154..f848198de531c 100644 --- a/pom.xml +++ b/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 pom @@ -28,30 +27,24 @@ apache 29 - - org.apache.pulsar + io.streamnative pulsar - 3.1.0-SNAPSHOT - Pulsar Pulsar is a distributed pub-sub messaging platform with a very flexible messaging model and an intuitive client API. https://github.com/apache/pulsar - Apache Software Foundation https://www.apache.org/ 2017 - Apache Pulsar developers https://pulsar.apache.org/ - Apache License, Version 2.0 @@ -59,47 +52,38 @@ flexible messaging model and an intuitive client API. repo - - https://github.com/apache/pulsar - scm:git:https://github.com/apache/pulsar.git - scm:git:ssh://git@github.com:apache/pulsar.git + https://github.com/streamnative/pulsar + scm:git:https://github.com/streamnative/pulsar.git + scm:git:ssh://git@github.com:streamnative/pulsar.git - GitHub Actions https://github.com/apache/pulsar/actions - Github https://github.com/apache/pulsar/issues - 17 17 ${maven.compiler.target} 8 - - 3.2.0 - + 3.4.0 **/Test*.java,**/*Test.java,**/*Tests.java,**/*TestCase.java quarantine - UTF-8 UTF-8 - 2023-05-03T02:53:27Z + 2024-03-04T21:07:36Z true - - - + --add-opens java.base/jdk.internal.loader=ALL-UNNAMED @@ -116,7 +100,7 @@ flexible messaging model and an intuitive client API. false 1 true - + false ${project.build.directory} @@ -129,20 +113,20 @@ flexible messaging model and an intuitive client API. false package package - - 1.21 - - 4.16.2 - 3.8.1 + 1.26.0 + 4.16.4 + 3.9.1 1.5.0 1.10.0 - 1.1.10.1 - 4.1.12.1 + 1.1.10.5 + + 4.1.12.1 + 5.1.0 - 4.1.94.Final + 4.1.100.Final 0.0.21.Final - 9.4.51.v20230217 + 9.4.54.v20240208 2.5.2 2.34 1.10.50 @@ -154,19 +138,21 @@ flexible messaging model and an intuitive client API. 2.18.0 1.75 1.0.6 - 1.0.2.3 + 1.0.2.4 2.14.2 0.10.2 1.6.2 8.37 - 0.42.1 + 0.43.3 true 0.5.0 + 1.14.12 + 1.17 3.19.6 ${protobuf3.version} - 1.45.1 + 1.55.3 1.41.0 - 0.19.0 + 0.26.0 ${grpc.version} 2.8.9 1.2.1 @@ -175,9 +161,9 @@ flexible messaging model and an intuitive client API. 3.11.2 4.4.20 3.4.0 - 5.5.3 + 5.18.0 1.12.262 - 1.10.2 + 1.11.3 2.10.10 2.5.0 5.1.0 @@ -223,7 +209,7 @@ flexible messaging model and an intuitive client API. 0.9.1 2.1.0 3.24.2 - 1.18.26 + 1.18.30 1.3.2 2.3.1 1.2.0 @@ -234,7 +220,7 @@ flexible messaging model and an intuitive client API. 18.0.0 4.9.3 - 2.8.0 + 3.4.0 1.8.20 1.0 @@ -249,11 +235,9 @@ flexible messaging model and an intuitive client API. 3.4.3 1.5.2-3 2.0.6 - 1.18.3 2.2 - 3.3.0 1.1.1 @@ -266,7 +250,6 @@ flexible messaging model and an intuitive client API. 1.5.4 5.4.0 2.33.2 - 0.6.1 3.0.0 @@ -296,24 +279,20 @@ flexible messaging model and an intuitive client API. 0.1.4 1.3 0.4 - 8.2.1 + 9.0.7 0.9.44 1.6.1 6.4.0 - rename-netty-native-libs.sh - - org.jline jline ${jline3.version} - org.asynchttpclient async-http-client @@ -329,45 +308,39 @@ flexible messaging model and an intuitive client API. - org.testng testng ${testng.version} - - org.yaml - * - + + org.yaml + * + - org.hamcrest hamcrest ${hamcrest.version} test - org.awaitility awaitility ${awaitility.version} test - org.mockito mockito-core ${mockito.version} - org.mockito mockito-inline ${mockito.version} - org.apache.zookeeper zookeeper @@ -406,6 +379,12 @@ flexible messaging model and an intuitive client API. io.dropwizard.metrics metrics-graphite ${dropwizardmetrics.version} + + + com.rabbitmq + amqp-client + + io.dropwizard.metrics @@ -437,7 +416,6 @@ flexible messaging model and an intuitive client API. - org.apache.bookkeeper bookkeeper-server @@ -466,13 +444,11 @@ flexible messaging model and an intuitive client API. - org.apache.bookkeeper cpu-affinity ${bookkeeper.version} - io.vertx vertx-core @@ -483,38 +459,33 @@ flexible messaging model and an intuitive client API. vertx-web ${vertx.version} - - org.apache.curator - curator-recipes - ${curator.version} - - - org.apache.zookeeper - * - - + org.apache.curator + curator-recipes + ${curator.version} + + + org.apache.zookeeper + * + + - org.apache.bookkeeper bookkeeper-common-allocator ${bookkeeper.version} - org.apache.bookkeeper bookkeeper-tools-framework ${bookkeeper.version} - org.reflections reflections ${reflections.version} - org.apache.bookkeeper @@ -541,9 +512,12 @@ flexible messaging model and an intuitive client API. com.squareup.okio okio + + jose4j + org.bitbucket.b_c + - org.apache.bookkeeper @@ -576,19 +550,16 @@ flexible messaging model and an intuitive client API. - org.apache.bookkeeper bookkeeper-common ${bookkeeper.version} - org.apache.bookkeeper.stats bookkeeper-stats-api ${bookkeeper.version} - org.apache.bookkeeper.stats datasketches-metrics-provider @@ -600,37 +571,31 @@ flexible messaging model and an intuitive client API. - org.apache.bookkeeper.stats prometheus-metrics-provider ${bookkeeper.version} - org.rocksdb rocksdbjni ${rocksdb.version} - org.eclipse.jetty jetty-server ${jetty.version} - org.eclipse.jetty jetty-alpn-conscrypt-server ${jetty.version} - org.conscrypt conscrypt-openjdk-uber ${conscrypt.version} - org.eclipse.jetty jetty-bom @@ -638,7 +603,6 @@ flexible messaging model and an intuitive client API. pom import - io.netty netty-bom @@ -646,7 +610,6 @@ flexible messaging model and an intuitive client API. pom import - io.netty.incubator netty-incubator-transport-classes-io_uring @@ -669,79 +632,66 @@ flexible messaging model and an intuitive client API. ${netty-iouring.version} linux-aarch_64 - com.beust jcommander ${jcommander.version} - com.google.guava guava ${guava.version} - com.google.inject guice ${guice.version} - com.google.inject.extensions guice-assistedinject ${guice.version} - org.apache.commons commons-lang3 ${commons-lang3.version} - org.apache.commons commons-compress ${commons-compress.version} - commons-configuration commons-configuration ${commons-configuration.version} - commons-io commons-io ${commons-io.version} - org.apache.commons commons-text ${commons-text.version} - org.slf4j slf4j-api ${slf4j.version} - org.slf4j slf4j-simple ${slf4j.version} - org.slf4j jcl-over-slf4j ${slf4j.version} - org.apache.logging.log4j log4j-bom @@ -749,49 +699,41 @@ flexible messaging model and an intuitive client API. pom import - commons-codec commons-codec ${commons-codec.version} - org.glassfish.jersey.core jersey-server ${jersey.version} - org.glassfish.jersey.core jersey-client ${jersey.version} - org.glassfish.jersey.inject jersey-hk2 ${jersey.version} - org.glassfish.jersey.containers jersey-container-servlet-core ${jersey.version} - org.glassfish.jersey.containers jersey-container-servlet ${jersey.version} - javax.ws.rs javax.ws.rs-api ${javax.ws.rs-api.version} - org.glassfish.jersey.media jersey-media-json-jackson @@ -803,19 +745,16 @@ flexible messaging model and an intuitive client API. - org.glassfish.jersey.media jersey-media-multipart ${jersey.version} - net.java.dev.jna jna ${jna.version} - com.github.docker-java docker-java-core @@ -837,7 +776,6 @@ flexible messaging model and an intuitive client API. docker-java-transport-zerodep ${docker-java.version} - com.fasterxml.jackson jackson-bom @@ -845,56 +783,46 @@ flexible messaging model and an intuitive client API. pom import - org.codehaus.jettison jettison ${jettison.version} - com.fasterxml.woodstox woodstox-core ${woodstox.version} - - org.hdrhistogram HdrHistogram ${hdrHistogram.version} - io.swagger swagger-core ${swagger.version} - io.swagger swagger-annotations ${swagger.version} - javax.servlet javax.servlet-api ${javax.servlet-api} - com.github.ben-manes.caffeine caffeine ${caffeine.version} - org.bouncycastle bcpkix-jdk18on ${bouncycastle.version} - com.cronutils cron-utils @@ -906,19 +834,16 @@ flexible messaging model and an intuitive client API. - com.yahoo.athenz athenz-zts-java-client-core ${athenz.version} - com.yahoo.athenz athenz-zpe-java-client ${athenz.version} - com.yahoo.athenz athenz-cert-refresher @@ -930,7 +855,6 @@ flexible messaging model and an intuitive client API. - com.yahoo.athenz athenz-auth-core @@ -942,67 +866,56 @@ flexible messaging model and an intuitive client API. - com.github.zafarkhaja java-semver ${java-semver.version} - io.prometheus simpleclient ${prometheus.version} - io.prometheus simpleclient_hotspot ${prometheus.version} - io.prometheus simpleclient_log4j2 ${prometheus.version} - io.prometheus simpleclient_servlet ${prometheus.version} - io.prometheus simpleclient_jetty ${prometheus.version} - io.prometheus simpleclient_caffeine ${prometheus.version} - com.carrotsearch hppc ${hppc.version} - io.etcd jetcd-core ${jetcd.version} - io.etcd jetcd-test ${jetcd.version} - org.apache.spark spark-streaming_2.10 @@ -1030,7 +943,6 @@ flexible messaging model and an intuitive client API. - io.jsonwebtoken jjwt-api @@ -1046,13 +958,21 @@ flexible messaging model and an intuitive client API. jjwt-jackson ${jsonwebtoken.version} - net.jodah typetools ${typetools.version} - + + net.bytebuddy + byte-buddy + ${byte-buddy.version} + + + org.zeroturnaround + zt-zip + ${zt-zip.version} + io.grpc grpc-bom @@ -1060,7 +980,6 @@ flexible messaging model and an intuitive client API. pom import - io.grpc grpc-all @@ -1084,7 +1003,6 @@ flexible messaging model and an intuitive client API. - io.grpc grpc-xds @@ -1096,25 +1014,21 @@ flexible messaging model and an intuitive client API. - com.google.http-client google-http-client ${google-http-client.version} - com.google.http-client google-http-client-jackson2 ${google-http-client.version} - com.google.http-client google-http-client-gson ${google-http-client.version} - io.perfmark perfmark-api @@ -1127,7 +1041,6 @@ flexible messaging model and an intuitive client API. - com.google.protobuf protobuf-bom @@ -1135,19 +1048,16 @@ flexible messaging model and an intuitive client API. pom import - com.google.code.gson gson ${gson.version} - com.yahoo.datasketches sketches-core ${sketches.version} - com.amazonaws aws-java-sdk-bom @@ -1155,7 +1065,6 @@ flexible messaging model and an intuitive client API. pom import - org.apache.distributedlog distributedlog-core @@ -1168,13 +1077,11 @@ flexible messaging model and an intuitive client API. - org.apache.commons commons-collections4 ${commons.collections4.version} - com.lmax @@ -1198,103 +1105,86 @@ flexible messaging model and an intuitive client API. assertj-core ${assertj-core.version} - org.projectlombok lombok ${lombok.version} - javax.annotation javax.annotation-api ${javax.annotation-api.version} - javax.xml.bind jaxb-api ${jaxb-api} - jakarta.xml.bind jakarta.xml.bind-api ${jakarta.xml.bind.version} - com.sun.activation javax.activation ${javax.activation.version} - com.sun.activation jakarta.activation ${jakarta.activation.version} - jakarta.activation jakarta.activation-api ${jakarta.activation.version} - jakarta.validation jakarta.validation-api ${jakarta.validation.version} - io.opencensus opencensus-api ${opencensus.version} - io.opencensus opencensus-contrib-http-util ${opencensus.version} - io.opencensus opencensus-contrib-grpc-metrics ${opencensus.version} - org.opensearch.client opensearch-rest-high-level-client ${opensearch.version} - co.elastic.clients elasticsearch-java ${elasticsearch-java.version} - joda-time joda-time ${joda.version} - org.javassist javassist ${javassist.version} - net.jcip jcip-annotations ${jcip.version} - io.airlift aircompressor @@ -1306,25 +1196,21 @@ flexible messaging model and an intuitive client API. - org.objenesis objenesis ${objenesis.version} - org.apache.httpcomponents httpclient ${apache-http-client.version} - org.apache.httpcomponents httpcore ${apache-httpcomponents.version} - com.github.spotbugs spotbugs-annotations @@ -1332,31 +1218,26 @@ flexible messaging model and an intuitive client API. provided true - com.google.errorprone error_prone_annotations ${errorprone.version} - com.google.j2objc j2objc-annotations ${j2objc-annotations.version} - org.yaml snakeyaml ${snakeyaml.version} - org.apache.ant ant ${ant.version} - com.squareup.okhttp3 okhttp @@ -1377,7 +1258,6 @@ flexible messaging model and an intuitive client API. okio ${okio.version} - org.jetbrains.kotlin kotlin-stdlib @@ -1388,25 +1268,21 @@ flexible messaging model and an intuitive client API. kotlin-stdlib-common ${kotlin-stdlib.version} - org.jetbrains.kotlin kotlin-stdlib-jdk8 ${kotlin-stdlib.version} - com.github.luben zstd-jni ${zstd-jni.version} - - com.typesafe.netty - netty-reactive-streams - ${netty-reactive-streams.version} + com.typesafe.netty + netty-reactive-streams + ${netty-reactive-streams.version} - org.roaringbitmap RoaringBitmap @@ -1419,47 +1295,40 @@ flexible messaging model and an intuitive client API. - - org.apache.pulsar + io.streamnative buildtools ${project.version} test - org.testng testng test - org.mockito mockito-core test - org.mockito mockito-inline test - com.github.stefanbirkner system-lambda ${system-lambda.version} test - org.assertj assertj-core test - org.projectlombok lombok @@ -1470,7 +1339,6 @@ flexible messaging model and an intuitive client API. javax.annotation-api provided - org.apache.bookkeeper @@ -1480,8 +1348,8 @@ flexible messaging model and an intuitive client API. tests - org.bouncycastle - * + org.bouncycastle + * org.slf4j @@ -1501,8 +1369,26 @@ flexible messaging model and an intuitive client API. + + + org.apache.bookkeeper + bookkeeper-common + ${bookkeeper.version} + test + tests + + + com.fasterxml.jackson.core + * + + + + + org.apache.logging.log4j + log4j-layout-template-json + 2.20.0 + - ${project.artifactId} @@ -1577,7 +1463,6 @@ flexible messaging model and an intuitive client API. - org.commonjava.maven.plugins directory-maven-plugin @@ -1592,14 +1477,13 @@ flexible messaging model and an intuitive client API. pulsar.basedir - org.apache.pulsar + io.streamnative pulsar - pl.project13.maven git-commit-id-plugin @@ -1624,7 +1508,6 @@ flexible messaging model and an intuitive client API. - com.mycila license-maven-plugin @@ -1663,7 +1546,7 @@ flexible messaging model and an intuitive client API. **/ByteBufCodedOutputStream.java **/ahc.properties bin/proto/* - conf/schema_example.conf + conf/schema_example.json data/** logs/** **/circe/** @@ -1725,33 +1608,26 @@ flexible messaging model and an intuitive client API. src/assemble/README.bin.txt src/assemble/LICENSE.bin.txt src/assemble/NOTICE.bin.txt - src/main/java/org/apache/bookkeeper/mledger/proto/MLDataFormats.java src/main/java/org/apache/pulsar/broker/service/schema/proto/SchemaRegistryFormat.java bin/proto/MLDataFormats_pb2.py - **/avro/generated/*.java - **/*.avsc - src/main/java/org/apache/pulsar/io/kinesis/fbs/CompressionType.java src/main/java/org/apache/pulsar/io/kinesis/fbs/EncryptionCtx.java src/main/java/org/apache/pulsar/io/kinesis/fbs/EncryptionKey.java src/main/java/org/apache/pulsar/io/kinesis/fbs/KeyValue.java src/main/java/org/apache/pulsar/io/kinesis/fbs/Message.java - src/main/java/org/apache/bookkeeper/mledger/util/AbstractCASReferenceCounted.java - dependency-reduced-pom.xml - pulsar-client-go/go.mod pulsar-client-go/go.sum @@ -1759,15 +1635,12 @@ flexible messaging model and an intuitive client API. pulsar-function-go/go.sum pulsar-function-go/examples/go.mod pulsar-function-go/examples/go.sum - **/META-INF/services/com.scurrilous.circe.HashProvider **/META-INF/services/io.trino.spi.Plugin - **/django/stats/migrations/*.py **/conf/uwsgi_params - **/*.crt **/*.key @@ -1780,21 +1653,16 @@ flexible messaging model and an intuitive client API. certificate-authority/index.txt certificate-authority/serial certificate-authority/README.md - **/zk-3.5-test-data/* - **/requirements.txt - - conf/schema_example.conf + conf/schema_example.json **/templates/*.tpl - **/.helmignore **/_helpers.tpl - **/*.md .github/** @@ -1823,6 +1691,7 @@ flexible messaging model and an intuitive client API. **/*.so.* **/*.dylib src/test/resources/*.txt + **/dependency-reduced-pom.xml @@ -1831,55 +1700,106 @@ flexible messaging model and an intuitive client API. maven-enforcer-plugin ${maven-enforcer-plugin.version} - - enforce-maven - - enforce - - - - - 17 - Java 17+ is required to build Pulsar. - - - 3.6.1 - - - - + + enforce-maven + + enforce + + + + + 17 + Java 17+ is required to build Pulsar. + + + 3.6.1 + + + + - org.apache.maven.plugins - maven-assembly-plugin - ${maven-assembly-plugin.version} - false - - - source-release-assembly-tar-gz - generate-sources - - single - - - ${skipSourceReleaseAssembly} - true - - src/assembly-source-package.xml - - apache-pulsar-${project.version}-src - false - - tar.gz - - posix - - - + org.apache.maven.plugins + maven-assembly-plugin + ${maven-assembly-plugin.version} + false + + + source-release-assembly-tar-gz + generate-sources + + single + + + ${skipSourceReleaseAssembly} + true + + src/assembly-source-package.xml + + apache-pulsar-${project.version}-src + false + + tar.gz + + posix + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.13 + true + + ossrh + https://s01.oss.sonatype.org/ + 60285aee9d4161 + true + + + + org.apache.maven.plugins + maven-source-plugin + 3.2.0 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + + + attach-javadocs + + jar + + + none + false + + + + + + org.apache.maven.plugins + maven-release-plugin + 3.0.0-M1 + + true + false + release + deploy + - @@ -1900,10 +1820,10 @@ flexible messaging model and an intuitive client API. maven-surefire-plugin - ${include} + ${include} - **/*$*,${exclude} + **/*$*,${exclude} ${groups} ${excludedGroups} @@ -1987,13 +1907,13 @@ flexible messaging model and an intuitive client API. com.github.spotbugs spotbugs-maven-plugin ${spotbugs-maven-plugin.version} - - - com.github.spotbugs - spotbugs - ${spotbugs.version} - - + + + com.github.spotbugs + spotbugs + ${spotbugs.version} + + org.codehaus.mojo @@ -2005,6 +1925,16 @@ flexible messaging model and an intuitive client API. docker-maven-plugin ${docker-maven.version} + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.13 + + + org.apache.maven.plugins + maven-source-plugin + 3.2.0 + @@ -2020,7 +1950,6 @@ flexible messaging model and an intuitive client API. - @@ -2129,7 +2058,6 @@ flexible messaging model and an intuitive client API. tests - contrib-check - - - - org.apache.rat - apache-rat-plugin - - - - check - - verify - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - check-style - verify - - ${pulsar.basedir}/buildtools/src/main/resources/pulsar/checkstyle.xml - ${pulsar.basedir}/buildtools/src/main/resources/pulsar/suppressions.xml - UTF-8 - **/proto/* - - - check - - - - - - + + + + org.apache.rat + apache-rat-plugin + + + + check + + verify + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + check-style + verify + + ${pulsar.basedir}/buildtools/src/main/resources/pulsar/checkstyle.xml + ${pulsar.basedir}/buildtools/src/main/resources/pulsar/suppressions.xml + UTF-8 + **/proto/* + + + check + + + + + + - windows @@ -2185,7 +2112,6 @@ flexible messaging model and an intuitive client API. rename-netty-native-libs.cmd - @@ -2230,37 +2156,27 @@ flexible messaging model and an intuitive client API. pulsar-broker-auth-sasl pulsar-client-auth-sasl pulsar-config-validation - structured-event-log - pulsar-transaction - pulsar-functions - pulsar-io - bouncy-castle - pulsar-client-messagecrypto-bc - pulsar-metadata jclouds-shaded - pulsar-package-management - distribution docker tests - core-modules @@ -2290,31 +2206,23 @@ flexible messaging model and an intuitive client API. pulsar-broker-auth-sasl pulsar-client-auth-sasl pulsar-config-validation - pulsar-transaction - pulsar-functions - pulsar-io - bouncy-castle - pulsar-client-messagecrypto-bc - distribution pulsar-metadata - pulsar-package-management - - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT - pulsar-broker-auth-athenz jar Athenz authentication plugin for broker - - ${project.groupId} pulsar-broker ${project.version} - ${project.groupId} testmocks ${project.version} test - com.yahoo.athenz athenz-zpe-java-client - org.bouncycastle bcpkix-jdk18on - - @@ -79,7 +69,6 @@ - com.github.spotbugs spotbugs-maven-plugin @@ -97,7 +86,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -111,7 +99,6 @@ - diff --git a/pulsar-broker-auth-athenz/src/test/resources/findbugsExclude.xml b/pulsar-broker-auth-athenz/src/test/resources/findbugsExclude.xml index ddde8120ba518..47c3d73af5f06 100644 --- a/pulsar-broker-auth-athenz/src/test/resources/findbugsExclude.xml +++ b/pulsar-broker-auth-athenz/src/test/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-broker-auth-oidc/pom.xml b/pulsar-broker-auth-oidc/pom.xml index 157d8d0815d4f..e898923dfd659 100644 --- a/pulsar-broker-auth-oidc/pom.xml +++ b/pulsar-broker-auth-oidc/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT - pulsar-broker-auth-oidc jar Open ID Connect authentication plugin for broker - 0.11.5 - - ${project.groupId} pulsar-broker-common @@ -50,29 +44,24 @@ - com.auth0 java-jwt 4.3.0 - com.auth0 jwks-rsa 0.22.0 - com.github.ben-manes.caffeine caffeine - org.asynchttpclient async-http-client - io.kubernetes client-java @@ -97,7 +86,6 @@ - io.jsonwebtoken jjwt-api @@ -110,16 +98,13 @@ ${jsonwebtoken.version} test - com.github.tomakehurst wiremock-jre8 ${wiremock.version} test - - @@ -141,8 +126,6 @@ - - @@ -162,7 +145,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-broker-auth-sasl/pom.xml b/pulsar-broker-auth-sasl/pom.xml index 2957159ca93e8..cd06ee0f6f16e 100644 --- a/pulsar-broker-auth-sasl/pom.xml +++ b/pulsar-broker-auth-sasl/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT - pulsar-broker-auth-sasl jar SASL authentication plugin for broker - - ${project.groupId} pulsar-broker ${project.version} - org.apache.kerby kerby-config @@ -53,7 +47,6 @@ - org.apache.kerby kerb-simplekdc @@ -66,30 +59,25 @@ - ${project.groupId} pulsar-proxy ${project.version} test - ${project.groupId} testmocks ${project.version} test - ${project.groupId} pulsar-client-auth-sasl ${project.version} test - - @@ -111,8 +99,6 @@ - - @@ -132,7 +118,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-broker-auth-sasl/src/main/java/org/apache/pulsar/broker/authentication/SaslRoleTokenSigner.java b/pulsar-broker-auth-sasl/src/main/java/org/apache/pulsar/broker/authentication/SaslRoleTokenSigner.java index c979566e485ce..82f760e14b7d6 100644 --- a/pulsar-broker-auth-sasl/src/main/java/org/apache/pulsar/broker/authentication/SaslRoleTokenSigner.java +++ b/pulsar-broker-auth-sasl/src/main/java/org/apache/pulsar/broker/authentication/SaslRoleTokenSigner.java @@ -76,7 +76,7 @@ public String verifyAndExtract(String signedStr) throws AuthenticationException String originalSignature = signedStr.substring(index + SIGNATURE.length()); String rawValue = signedStr.substring(0, index); String currentSignature = computeSignature(rawValue); - if (!originalSignature.equals(currentSignature)) { + if (!MessageDigest.isEqual(originalSignature.getBytes(), currentSignature.getBytes())){ throw new AuthenticationException("Invalid signature"); } return rawValue; diff --git a/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java b/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java index 261efe680f862..f0e45aa734afb 100644 --- a/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java +++ b/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java @@ -49,6 +49,7 @@ import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.impl.auth.AuthenticationSasl; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; +import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.proxy.server.ProxyConfiguration; import org.apache.pulsar.proxy.server.ProxyService; import org.slf4j.Logger; @@ -193,15 +194,17 @@ protected void setup() throws Exception { conf.setAuthenticationProviders(providers); conf.setClusterName("test"); conf.setSuperUserRoles(ImmutableSet.of("client/" + localHostname + "@" + kdc.getRealm())); - - super.init(); - - lookupUrl = new URI(pulsar.getBrokerServiceUrl()); - // set admin auth, to verify admin web resources Map clientSaslConfig = new HashMap<>(); clientSaslConfig.put("saslJaasClientSectionName", "PulsarClient"); clientSaslConfig.put("serverType", "broker"); + conf.setBrokerClientAuthenticationPlugin(AuthenticationSasl.class.getName()); + conf.setBrokerClientAuthenticationParameters(ObjectMapperFactory + .getMapper().getObjectMapper().writeValueAsString(clientSaslConfig)); + + super.init(); + + lookupUrl = new URI(pulsar.getBrokerServiceUrl()); log.info("set client jaas section name: PulsarClient"); admin = PulsarAdmin.builder() .serviceHttpUrl(brokerUrl.toString()) diff --git a/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/SaslAuthenticateTest.java b/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/SaslAuthenticateTest.java index 5cace2221dea8..230c2ad787de4 100644 --- a/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/SaslAuthenticateTest.java +++ b/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/SaslAuthenticateTest.java @@ -58,6 +58,7 @@ import org.apache.pulsar.client.impl.auth.AuthenticationSasl; import org.apache.pulsar.common.api.AuthData; import org.apache.pulsar.common.sasl.SaslConstants; +import org.apache.pulsar.common.util.ObjectMapperFactory; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; @@ -186,7 +187,12 @@ protected void setup() throws Exception { conf.setAuthenticationProviders(providers); conf.setClusterName("test"); conf.setSuperUserRoles(ImmutableSet.of("client" + "@" + kdc.getRealm())); - + Map clientSaslConfig = new HashMap<>(); + clientSaslConfig.put("saslJaasClientSectionName", "PulsarClient"); + clientSaslConfig.put("serverType", "broker"); + conf.setBrokerClientAuthenticationPlugin(AuthenticationSasl.class.getName()); + conf.setBrokerClientAuthenticationParameters(ObjectMapperFactory + .getMapper().getObjectMapper().writeValueAsString(clientSaslConfig)); super.init(); lookupUrl = new URI(pulsar.getWebServiceAddress()); @@ -197,9 +203,6 @@ protected void setup() throws Exception { .authentication(authSasl)); // set admin auth, to verify admin web resources - Map clientSaslConfig = new HashMap<>(); - clientSaslConfig.put("saslJaasClientSectionName", "PulsarClient"); - clientSaslConfig.put("serverType", "broker"); log.info("set client jaas section name: PulsarClient"); admin = PulsarAdmin.builder() .serviceHttpUrl(brokerUrl.toString()) diff --git a/pulsar-broker-common/pom.xml b/pulsar-broker-common/pom.xml index b81f2b7621ba7..a6d0e21a0ca59 100644 --- a/pulsar-broker-common/pom.xml +++ b/pulsar-broker-common/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT - pulsar-broker-common Common classes used in multiple broker modules - ${project.groupId} pulsar-metadata ${project.version} - com.google.guava guava - io.prometheus simpleclient_jetty - javax.servlet javax.servlet-api - javax.ws.rs javax.ws.rs-api - io.jsonwebtoken jjwt-impl - io.jsonwebtoken jjwt-jackson - org.bouncycastle @@ -76,14 +65,12 @@ ${bouncycastle.bc-fips.version} test - org.awaitility awaitility test - @@ -103,7 +90,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -117,7 +103,6 @@ - maven-resources-plugin diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMapping.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMapping.java index e9e350800b44e..983822f22941b 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMapping.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMapping.java @@ -121,8 +121,6 @@ public synchronized void setConf(Configuration conf) { store.registerListener(this::handleUpdates); racksWithHost = bookieMappingCache.get(BOOKIE_INFO_ROOT_PATH).get() .orElseGet(BookiesRackConfiguration::new); - updateRacksWithHost(racksWithHost); - watchAvailableBookies(); for (Map bookieMapping : racksWithHost.values()) { for (String address : bookieMapping.keySet()) { bookieAddressListLastTime.add(BookieId.parse(address)); @@ -132,6 +130,8 @@ public synchronized void setConf(Configuration conf) { bookieAddressListLastTime); } } + updateRacksWithHost(racksWithHost); + watchAvailableBookies(); } catch (InterruptedException | ExecutionException | MetadataException e) { throw new RuntimeException(METADATA_STORE_INSTANCE + " failed to init BookieId list"); } @@ -245,6 +245,7 @@ private void handleUpdates(Notification n) { bookieMappingCache.get(BOOKIE_INFO_ROOT_PATH) .thenAccept(optVal -> { + Set bookieIdSet = new HashSet<>(); synchronized (this) { LOG.info("Bookie rack info updated to {}. Notifying rackaware policy.", optVal); this.updateRacksWithHost(optVal.orElseGet(BookiesRackConfiguration::new)); @@ -259,12 +260,12 @@ private void handleUpdates(Notification n) { LOG.debug("Bookies with rack update from {} to {}", bookieAddressListLastTime, bookieAddressList); } - Set bookieIdSet = new HashSet<>(bookieAddressList); + bookieIdSet.addAll(bookieAddressList); bookieIdSet.addAll(bookieAddressListLastTime); bookieAddressListLastTime = bookieAddressList; - if (rackawarePolicy != null) { - rackawarePolicy.onBookieRackChange(new ArrayList<>(bookieIdSet)); - } + } + if (rackawarePolicy != null) { + rackawarePolicy.onBookieRackChange(new ArrayList<>(bookieIdSet)); } }); } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicy.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicy.java index 2594798485a20..02ddea9487469 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicy.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicy.java @@ -19,15 +19,15 @@ package org.apache.pulsar.bookie.rackawareness; import static org.apache.pulsar.bookie.rackawareness.BookieRackAffinityMapping.METADATA_STORE_INSTANCE; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Sets; import io.netty.util.HashedWheelTimer; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.concurrent.CompletableFuture; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException; import org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy; @@ -61,6 +61,7 @@ public class IsolatedBookieEnsemblePlacementPolicy extends RackawareEnsemblePlac private static final String PULSAR_SYSTEM_TOPIC_ISOLATION_GROUP = "*"; + private volatile BookiesRackConfiguration cachedRackConfiguration = null; public IsolatedBookieEnsemblePlacementPolicy() { super(); @@ -86,7 +87,12 @@ public RackawareEnsemblePlacementPolicyImpl initialize(ClientConfiguration conf, } // Only add the bookieMappingCache if we have defined an isolation group bookieMappingCache = store.getMetadataCache(BookiesRackConfiguration.class); - bookieMappingCache.get(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH).join(); + bookieMappingCache.get(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH).thenAccept(opt -> opt.ifPresent( + bookiesRackConfiguration -> cachedRackConfiguration = bookiesRackConfiguration)) + .exceptionally(e -> { + log.warn("Failed to load bookies rack configuration while initialize the PlacementPolicy."); + return null; + }); } if (conf.getProperty(SECONDARY_ISOLATION_BOOKIE_GROUPS) != null) { String secondaryIsolationGroupsString = ConfigurationStringUtil @@ -166,12 +172,12 @@ private static Pair, Set> getIsolationGroup( String secondaryIsolationGroupString = ConfigurationStringUtil .castToString(properties.getOrDefault(SECONDARY_ISOLATION_BOOKIE_GROUPS, "")); if (!primaryIsolationGroupString.isEmpty()) { - pair.setLeft(new HashSet<>(Arrays.asList(primaryIsolationGroupString.split(",")))); + pair.setLeft(Sets.newHashSet(primaryIsolationGroupString.split(","))); } else { pair.setLeft(Collections.emptySet()); } if (!secondaryIsolationGroupString.isEmpty()) { - pair.setRight(new HashSet<>(Arrays.asList(secondaryIsolationGroupString.split(",")))); + pair.setRight(Sets.newHashSet(secondaryIsolationGroupString.split(","))); } else { pair.setRight(Collections.emptySet()); } @@ -179,25 +185,26 @@ private static Pair, Set> getIsolationGroup( return pair; } - private Set getExcludedBookiesWithIsolationGroups(int ensembleSize, + @VisibleForTesting + Set getExcludedBookiesWithIsolationGroups(int ensembleSize, Pair, Set> isolationGroups) { Set excludedBookies = new HashSet<>(); - if (isolationGroups != null && isolationGroups.getLeft().contains(PULSAR_SYSTEM_TOPIC_ISOLATION_GROUP)) { + if (isolationGroups != null && isolationGroups.getLeft().contains(PULSAR_SYSTEM_TOPIC_ISOLATION_GROUP)) { return excludedBookies; } try { if (bookieMappingCache != null) { - CompletableFuture> future = - bookieMappingCache.get(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH); + bookieMappingCache.get(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH) + .thenAccept(opt -> cachedRackConfiguration = opt.orElse(null)).exceptionally(e -> { + log.warn("Failed to update the newest bookies rack config."); + return null; + }); - Optional optRes = (future.isDone() && !future.isCompletedExceptionally()) - ? future.join() : Optional.empty(); - - if (optRes.isEmpty()) { + BookiesRackConfiguration allGroupsBookieMapping = cachedRackConfiguration; + if (allGroupsBookieMapping == null) { + log.debug("The bookies rack config is not available at now."); return excludedBookies; } - - BookiesRackConfiguration allGroupsBookieMapping = optRes.get(); Set allBookies = allGroupsBookieMapping.keySet(); int totalAvailableBookiesInPrimaryGroup = 0; Set primaryIsolationGroup = Collections.emptySet(); diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java index 3d73734e82767..6175835826e5d 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java @@ -2425,9 +2425,10 @@ The delayed message index time step(in seconds) in per bucket snapshot segment, @FieldContext( dynamic = true, category = CATEGORY_LOAD_BALANCER, - doc = "Direct Memory Resource Usage Weight" + doc = "Direct Memory Resource Usage Weight. Direct memory usage cannot accurately reflect the " + + "machine's load, and it is not recommended to use it to score the machine's load." ) - private double loadBalancerDirectMemoryResourceWeight = 1.0; + private double loadBalancerDirectMemoryResourceWeight = 0; @FieldContext( dynamic = true, @@ -2776,6 +2777,12 @@ The delayed message index time step(in seconds) in per bucket snapshot segment, ) private long brokerServiceCompactionPhaseOneLoopTimeInSeconds = 30; + @FieldContext( + category = CATEGORY_SERVER, + doc = "Whether retain null-key message during topic compaction." + ) + private boolean topicCompactionRetainNullKey = true; + @FieldContext( category = CATEGORY_SERVER, doc = "Interval between checks to see if cluster is migrated and marks topic migrated " diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java index 69ef526012daa..9a7324a6d077a 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java @@ -71,4 +71,8 @@ public boolean hasSubscription() { public String getSubscription() { return subscription; } + + public AuthenticationDataSource getAuthData() { + return authData; + } } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java index fa613245cfa27..fdab233a51098 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java @@ -35,6 +35,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; +import org.apache.pulsar.broker.authentication.AuthenticationDataSubscription; import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; @@ -89,15 +90,18 @@ public void initialize(ServiceConfiguration conf, PulsarResources pulsarResource @Override public CompletableFuture isSuperUser(String role, AuthenticationDataSource authenticationData, ServiceConfiguration serviceConfiguration) { - Set roles = getRoles(authenticationData); - if (roles.isEmpty()) { - return CompletableFuture.completedFuture(false); - } + // if superUser role contains in config, return true. Set superUserRoles = serviceConfiguration.getSuperUserRoles(); if (superUserRoles.isEmpty()) { return CompletableFuture.completedFuture(false); } - + if (role != null && superUserRoles.contains(role)) { + return CompletableFuture.completedFuture(true); + } + Set roles = getRoles(role, authenticationData); + if (roles.isEmpty()) { + return CompletableFuture.completedFuture(false); + } return CompletableFuture.completedFuture(roles.stream().anyMatch(superUserRoles::contains)); } @@ -109,7 +113,7 @@ public CompletableFuture validateTenantAdminAccess(String tenantName, S if (isSuperUser) { return CompletableFuture.completedFuture(true); } - Set roles = getRoles(authData); + Set roles = getRoles(role, authData); if (roles.isEmpty()) { return CompletableFuture.completedFuture(false); } @@ -140,7 +144,12 @@ public CompletableFuture validateTenantAdminAccess(String tenantName, S }); } - private Set getRoles(AuthenticationDataSource authData) { + private Set getRoles(String role, AuthenticationDataSource authData) { + if (authData == null || (authData instanceof AuthenticationDataSubscription + && ((AuthenticationDataSubscription) authData).getAuthData() == null)) { + return Collections.singleton(role); + } + String token = null; if (authData.hasDataFromCommand()) { @@ -174,7 +183,14 @@ private Set getRoles(AuthenticationDataSource authData) { Jwt jwt = parser.parseClaimsJwt(unsignedToken); try { - return new HashSet<>(Collections.singletonList(jwt.getBody().get(roleClaim, String.class))); + final String jwtRole = jwt.getBody().get(roleClaim, String.class); + if (jwtRole == null) { + if (log.isDebugEnabled()) { + log.debug("Do not have corresponding claim in jwt token. claim={}", roleClaim); + } + return Collections.emptySet(); + } + return new HashSet<>(Collections.singletonList(jwtRole)); } catch (RequiredTypeException requiredTypeException) { try { List list = jwt.getBody().get(roleClaim, List.class); @@ -189,15 +205,21 @@ private Set getRoles(AuthenticationDataSource authData) { return Collections.emptySet(); } - public CompletableFuture authorize(AuthenticationDataSource authenticationData, Function> authorizeFunc) { - Set roles = getRoles(authenticationData); - if (roles.isEmpty()) { - return CompletableFuture.completedFuture(false); - } - List> futures = new ArrayList<>(roles.size()); - roles.forEach(r -> futures.add(authorizeFunc.apply(r))); - return FutureUtil.waitForAny(futures, ret -> (boolean) ret).thenApply(v -> v.isPresent()); + public CompletableFuture authorize(String role, AuthenticationDataSource authenticationData, + Function> authorizeFunc) { + return isSuperUser(role, authenticationData, conf) + .thenCompose(superUser -> { + if (superUser) { + return CompletableFuture.completedFuture(true); + } + Set roles = getRoles(role, authenticationData); + if (roles.isEmpty()) { + return CompletableFuture.completedFuture(false); + } + List> futures = new ArrayList<>(roles.size()); + roles.forEach(r -> futures.add(authorizeFunc.apply(r))); + return FutureUtil.waitForAny(futures, ret -> (boolean) ret).thenApply(v -> v.isPresent()); + }); } /** @@ -209,7 +231,7 @@ public CompletableFuture authorize(AuthenticationDataSource authenticat @Override public CompletableFuture canProduceAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData) { - return authorize(authenticationData, r -> super.canProduceAsync(topicName, r, authenticationData)); + return authorize(role, authenticationData, r -> super.canProduceAsync(topicName, r, authenticationData)); } /** @@ -224,7 +246,7 @@ public CompletableFuture canProduceAsync(TopicName topicName, String ro public CompletableFuture canConsumeAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData, String subscription) { - return authorize(authenticationData, r -> super.canConsumeAsync(topicName, r, authenticationData, + return authorize(role, authenticationData, r -> super.canConsumeAsync(topicName, r, authenticationData, subscription)); } @@ -241,25 +263,27 @@ public CompletableFuture canConsumeAsync(TopicName topicName, String ro @Override public CompletableFuture canLookupAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData) { - return authorize(authenticationData, r -> super.canLookupAsync(topicName, r, authenticationData)); + return authorize(role, authenticationData, r -> super.canLookupAsync(topicName, r, authenticationData)); } @Override public CompletableFuture allowFunctionOpsAsync(NamespaceName namespaceName, String role, AuthenticationDataSource authenticationData) { - return authorize(authenticationData, r -> super.allowFunctionOpsAsync(namespaceName, r, authenticationData)); + return authorize(role, authenticationData, + r -> super.allowFunctionOpsAsync(namespaceName, r, authenticationData)); } @Override public CompletableFuture allowSourceOpsAsync(NamespaceName namespaceName, String role, AuthenticationDataSource authenticationData) { - return authorize(authenticationData, r -> super.allowSourceOpsAsync(namespaceName, r, authenticationData)); + return authorize(role, authenticationData, + r -> super.allowSourceOpsAsync(namespaceName, r, authenticationData)); } @Override public CompletableFuture allowSinkOpsAsync(NamespaceName namespaceName, String role, AuthenticationDataSource authenticationData) { - return authorize(authenticationData, r -> super.allowSinkOpsAsync(namespaceName, r, authenticationData)); + return authorize(role, authenticationData, r -> super.allowSinkOpsAsync(namespaceName, r, authenticationData)); } @Override @@ -267,7 +291,7 @@ public CompletableFuture allowTenantOperationAsync(String tenantName, String role, TenantOperation operation, AuthenticationDataSource authData) { - return authorize(authData, r -> super.allowTenantOperationAsync(tenantName, r, operation, authData)); + return authorize(role, authData, r -> super.allowTenantOperationAsync(tenantName, r, operation, authData)); } @Override @@ -275,7 +299,8 @@ public CompletableFuture allowNamespaceOperationAsync(NamespaceName nam String role, NamespaceOperation operation, AuthenticationDataSource authData) { - return authorize(authData, r -> super.allowNamespaceOperationAsync(namespaceName, r, operation, authData)); + return authorize(role, authData, + r -> super.allowNamespaceOperationAsync(namespaceName, r, operation, authData)); } @Override @@ -284,8 +309,8 @@ public CompletableFuture allowNamespacePolicyOperationAsync(NamespaceNa PolicyOperation operation, String role, AuthenticationDataSource authData) { - return authorize(authData, r -> super.allowNamespacePolicyOperationAsync(namespaceName, policy, operation, r, - authData)); + return authorize(role, authData, + r -> super.allowNamespacePolicyOperationAsync(namespaceName, policy, operation, r, authData)); } @Override @@ -293,7 +318,7 @@ public CompletableFuture allowTopicOperationAsync(TopicName topicName, String role, TopicOperation operation, AuthenticationDataSource authData) { - return authorize(authData, r -> super.allowTopicOperationAsync(topicName, r, operation, authData)); + return authorize(role, authData, r -> super.allowTopicOperationAsync(topicName, r, operation, authData)); } @Override @@ -302,7 +327,7 @@ public CompletableFuture allowTopicPolicyOperationAsync(TopicName topic PolicyName policyName, PolicyOperation policyOperation, AuthenticationDataSource authData) { - return authorize(authData, r -> super.allowTopicPolicyOperationAsync(topicName, r, policyName, policyOperation, - authData)); + return authorize(role, authData, + r -> super.allowTopicPolicyOperationAsync(topicName, r, policyName, policyOperation, authData)); } } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java index ece22fe223b97..acb6fce9b92e4 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java @@ -357,6 +357,9 @@ private CompletableFuture updateSubscriptionPermissionAsync(NamespaceName policies.auth_policies.getSubscriptionAuthentication().get(subscriptionName); if (subscriptionAuth != null) { subscriptionAuth.removeAll(roles); + if (subscriptionAuth.isEmpty()) { + policies.auth_policies.getSubscriptionAuthentication().remove(subscriptionName); + } } else { log.info("[{}] Couldn't find role {} while revoking for sub = {}", namespace, roles, subscriptionName); diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java index f70d3f31d6e2f..b5ccc9a5a9077 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java @@ -49,16 +49,18 @@ public class NamespaceResources extends BaseResources { private final IsolationPolicyResources isolationPolicies; private final PartitionedTopicResources partitionedTopicResources; private final MetadataStore configurationStore; + private final MetadataStore localStore; public static final String POLICIES_READONLY_FLAG_PATH = "/admin/flags/policies-readonly"; private static final String NAMESPACE_BASE_PATH = "/namespace"; private static final String BUNDLE_DATA_BASE_PATH = "/loadbalance/bundle-data"; - public NamespaceResources(MetadataStore configurationStore, int operationTimeoutSec) { + public NamespaceResources(MetadataStore localStore, MetadataStore configurationStore, int operationTimeoutSec) { super(configurationStore, Policies.class, operationTimeoutSec); this.configurationStore = configurationStore; isolationPolicies = new IsolationPolicyResources(configurationStore, operationTimeoutSec); partitionedTopicResources = new PartitionedTopicResources(configurationStore, operationTimeoutSec); + this.localStore = localStore; } public CompletableFuture> listNamespacesAsync(String tenant) { @@ -381,13 +383,13 @@ public CompletableFuture runWithMarkDeleteAsync(TopicName topic, // clear resource of `/loadbalance/bundle-data/{tenant}/{namespace}/` in metadata-store public CompletableFuture deleteBundleDataAsync(NamespaceName ns) { final String namespaceBundlePath = joinPath(BUNDLE_DATA_BASE_PATH, ns.toString()); - return getStore().deleteRecursive(namespaceBundlePath); + return this.localStore.deleteRecursive(namespaceBundlePath); } // clear resource of `/loadbalance/bundle-data/{tenant}/` in metadata-store public CompletableFuture deleteBundleDataTenantAsync(String tenant) { final String tenantBundlePath = joinPath(BUNDLE_DATA_BASE_PATH, tenant); - return getStore().deleteRecursive(tenantBundlePath); + return this.localStore.deleteRecursive(tenantBundlePath); } } \ No newline at end of file diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java index dfcd0a4194ff5..a3c5633a6dbe8 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java @@ -60,7 +60,8 @@ public PulsarResources(MetadataStore localMetadataStore, MetadataStore configura if (configurationMetadataStore != null) { tenantResources = new TenantResources(configurationMetadataStore, operationTimeoutSec); clusterResources = new ClusterResources(configurationMetadataStore, operationTimeoutSec); - namespaceResources = new NamespaceResources(configurationMetadataStore, operationTimeoutSec); + namespaceResources = new NamespaceResources(localMetadataStore, configurationMetadataStore + , operationTimeoutSec); resourcegroupResources = new ResourceGroupResources(configurationMetadataStore, operationTimeoutSec); } else { tenantResources = null; diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TopicResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TopicResources.java index 840ced0a1c1c4..0963f25c3d31f 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TopicResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TopicResources.java @@ -50,6 +50,9 @@ public TopicResources(MetadataStore store) { store.registerListener(this::handleNotification); } + /*** + * List persistent topics names under a namespace, the topic name contains the partition suffix. + */ public CompletableFuture> listPersistentTopicsAsync(NamespaceName ns) { String path = MANAGED_LEDGER_PATH + "/" + ns + "/persistent"; diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java index 0046d28afa444..f6fc42ff0fccf 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java @@ -79,12 +79,16 @@ public static AdditionalServlets load(PulsarConfiguration conf) throws IOExcepti return null; } + String[] additionalServletsList = additionalServlets.split(","); + if (additionalServletsList.length == 0) { + return null; + } + AdditionalServletDefinitions definitions = AdditionalServletUtils.searchForServlets(additionalServletDirectory , narExtractionDirectory); ImmutableMap.Builder builder = ImmutableMap.builder(); - String[] additionalServletsList = additionalServlets.split(","); for (String servletName : additionalServletsList) { AdditionalServletMetadata definition = definitions.servlets().get(servletName); if (null == definition) { @@ -106,7 +110,7 @@ public static AdditionalServlets load(PulsarConfiguration conf) throws IOExcepti } Map servlets = builder.build(); - if (servlets != null && !servlets.isEmpty()) { + if (!servlets.isEmpty()) { return new AdditionalServlets(servlets); } diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMappingTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMappingTest.java index d7be7dabd0db1..9cd8160444249 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMappingTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/BookieRackAffinityMappingTest.java @@ -21,6 +21,7 @@ import static org.apache.bookkeeper.feature.SettableFeatureProvider.DISABLE_ALL; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -28,6 +29,7 @@ import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.Method; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -35,7 +37,11 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import lombok.Cleanup; import org.apache.bookkeeper.client.DefaultBookieAddressResolver; import org.apache.bookkeeper.client.EnsemblePlacementPolicy; import org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy; @@ -46,6 +52,7 @@ import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.BookieNode; import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.net.NetworkTopology; import org.apache.bookkeeper.proto.BookieAddressResolver; import org.apache.bookkeeper.stats.NullStatsLogger; import org.apache.bookkeeper.stats.StatsLogger; @@ -55,6 +62,8 @@ import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.api.MetadataStoreConfig; import org.apache.pulsar.metadata.api.MetadataStoreFactory; +import org.apache.pulsar.metadata.api.Notification; +import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.bookkeeper.BookieServiceInfoSerde; import org.apache.pulsar.metadata.bookkeeper.PulsarRegistrationClient; import org.awaitility.Awaitility; @@ -254,6 +263,7 @@ public void testWithPulsarRegistrationClient() throws Exception { bkClientConf.getTimeoutTimerNumTicks()); RackawareEnsemblePlacementPolicy repp = new RackawareEnsemblePlacementPolicy(); + mapping.registerRackChangeListener(repp); Class clazz1 = Class.forName("org.apache.bookkeeper.client.TopologyAwareEnsemblePlacementPolicy"); Field field1 = clazz1.getDeclaredField("knownBookies"); field1.setAccessible(true); @@ -323,6 +333,81 @@ public void testWithPulsarRegistrationClient() throws Exception { assertEquals(knownBookies.get(BOOKIE2.toBookieId()).getNetworkLocation(), "/rack1"); assertEquals(knownBookies.get(BOOKIE3.toBookieId()).getNetworkLocation(), "/default-rack"); + //remove bookie2 rack, the bookie2 rack should be /default-rack + data = "{\"group1\": {\"" + BOOKIE1 + + "\": {\"rack\": \"/rack0\", \"hostname\": \"bookie1.example.com\"}}}"; + store.put(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH, data.getBytes(), Optional.empty()).join(); + Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> ((BookiesRackConfiguration)field.get(mapping)).get("group1").size() == 1); + + racks = mapping + .resolve(Lists.newArrayList(BOOKIE1.getHostName(), BOOKIE2.getHostName(), BOOKIE3.getHostName())) + .stream().filter(Objects::nonNull).toList(); + assertEquals(racks.size(), 1); + assertEquals(racks.get(0), "/rack0"); + assertEquals(knownBookies.size(), 3); + assertEquals(knownBookies.get(BOOKIE1.toBookieId()).getNetworkLocation(), "/rack0"); + assertEquals(knownBookies.get(BOOKIE2.toBookieId()).getNetworkLocation(), "/default-rack"); + assertEquals(knownBookies.get(BOOKIE3.toBookieId()).getNetworkLocation(), "/default-rack"); + timer.stop(); } + + @Test + public void testNoDeadlockWithRackawarePolicy() throws Exception { + ClientConfiguration bkClientConf = new ClientConfiguration(); + bkClientConf.setProperty(BookieRackAffinityMapping.METADATA_STORE_INSTANCE, store); + + BookieRackAffinityMapping mapping = new BookieRackAffinityMapping(); + mapping.setBookieAddressResolver(BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + mapping.setConf(bkClientConf); + + @Cleanup("stop") + HashedWheelTimer timer = new HashedWheelTimer(new ThreadFactoryBuilder().setNameFormat("TestTimer-%d").build(), + bkClientConf.getTimeoutTimerTickDurationMs(), TimeUnit.MILLISECONDS, + bkClientConf.getTimeoutTimerNumTicks()); + + RackawareEnsemblePlacementPolicy repp = new RackawareEnsemblePlacementPolicy(); + repp.initialize(bkClientConf, Optional.of(mapping), timer, + DISABLE_ALL, NullStatsLogger.INSTANCE, BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + repp.withDefaultRack(NetworkTopology.DEFAULT_REGION_AND_RACK); + + mapping.registerRackChangeListener(repp); + + @Cleanup("shutdownNow") + ExecutorService executor1 = Executors.newSingleThreadExecutor(); + @Cleanup("shutdownNow") + ExecutorService executor2 = Executors.newSingleThreadExecutor(); + + CountDownLatch count = new CountDownLatch(2); + + executor1.submit(() -> { + try { + Method handleUpdates = + BookieRackAffinityMapping.class.getDeclaredMethod("handleUpdates", Notification.class); + handleUpdates.setAccessible(true); + Notification n = + new Notification(NotificationType.Modified, BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH); + long start = System.currentTimeMillis(); + while (System.currentTimeMillis() - start < 2_000) { + handleUpdates.invoke(mapping, n); + } + count.countDown(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + executor2.submit(() -> { + Set writableBookies = new HashSet<>(); + writableBookies.add(BOOKIE1.toBookieId()); + long start = System.currentTimeMillis(); + while (System.currentTimeMillis() - start < 2_000) { + repp.onClusterChanged(writableBookies, Collections.emptySet()); + repp.onClusterChanged(Collections.emptySet(), Collections.emptySet()); + } + count.countDown(); + }); + + assertTrue(count.await(3, TimeUnit.SECONDS)); + } } diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicyTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicyTest.java index f535ced08f731..beb00197e4e9a 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicyTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/bookie/rackawareness/IsolatedBookieEnsemblePlacementPolicyTest.java @@ -18,11 +18,14 @@ */ package org.apache.pulsar.bookie.rackawareness; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Sets; import io.netty.util.HashedWheelTimer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -34,18 +37,23 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException; import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.bookkeeper.feature.SettableFeatureProvider; import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.BookieSocketAddress; import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.commons.lang3.tuple.MutablePair; import org.apache.pulsar.common.policies.data.BookieInfo; +import org.apache.pulsar.common.policies.data.BookiesRackConfiguration; import org.apache.pulsar.common.policies.data.EnsemblePlacementPolicyConfig; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.api.MetadataStoreConfig; import org.apache.pulsar.metadata.api.MetadataStoreFactory; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.cache.impl.MetadataCacheImpl; import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -114,6 +122,113 @@ public void testNonRegionBookie() throws Exception { assertFalse(ensemble.contains(new BookieSocketAddress(BOOKIE4).toBookieId())); } + @Test + public void testMetadataStoreCases() throws Exception { + Map mainBookieGroup = new HashMap<>(); + mainBookieGroup.put(BOOKIE1, BookieInfo.builder().rack("rack0").build()); + mainBookieGroup.put(BOOKIE2, BookieInfo.builder().rack("rack1").build()); + mainBookieGroup.put(BOOKIE3, BookieInfo.builder().rack("rack1").build()); + mainBookieGroup.put(BOOKIE4, BookieInfo.builder().rack("rack0").build()); + + Map secondaryBookieGroup = new HashMap<>(); + + store = mock(MetadataStoreExtended.class); + MetadataCacheImpl cache = mock(MetadataCacheImpl.class); + when(store.getMetadataCache(BookiesRackConfiguration.class)).thenReturn(cache); + CompletableFuture> initialFuture = new CompletableFuture<>(); + //The initialFuture only has group1. + BookiesRackConfiguration rackConfiguration1 = new BookiesRackConfiguration(); + rackConfiguration1.put("group1", mainBookieGroup); + rackConfiguration1.put("group2", secondaryBookieGroup); + initialFuture.complete(Optional.of(rackConfiguration1)); + + long waitTime = 2000; + CompletableFuture> waitingCompleteFuture = new CompletableFuture<>(); + new Thread(() -> { + try { + Thread.sleep(waitTime); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + //The waitingCompleteFuture has group1 and group2. + BookiesRackConfiguration rackConfiguration2 = new BookiesRackConfiguration(); + Map mainBookieGroup2 = new HashMap<>(); + mainBookieGroup2.put(BOOKIE1, BookieInfo.builder().rack("rack0").build()); + mainBookieGroup2.put(BOOKIE2, BookieInfo.builder().rack("rack1").build()); + mainBookieGroup2.put(BOOKIE4, BookieInfo.builder().rack("rack0").build()); + + Map secondaryBookieGroup2 = new HashMap<>(); + secondaryBookieGroup2.put(BOOKIE3, BookieInfo.builder().rack("rack0").build()); + rackConfiguration2.put("group1", mainBookieGroup2); + rackConfiguration2.put("group2", secondaryBookieGroup2); + waitingCompleteFuture.complete(Optional.of(rackConfiguration2)); + }).start(); + + long longWaitTime = 4000; + CompletableFuture> emptyFuture = new CompletableFuture<>(); + new Thread(() -> { + try { + Thread.sleep(longWaitTime); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + //The emptyFuture means that the zk node /bookies already be removed. + emptyFuture.complete(Optional.empty()); + }).start(); + + //Return different future means that cache expire. + when(cache.get(BookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH)) + .thenReturn(initialFuture).thenReturn(initialFuture) + .thenReturn(waitingCompleteFuture).thenReturn(waitingCompleteFuture) + .thenReturn(emptyFuture).thenReturn(emptyFuture); + + IsolatedBookieEnsemblePlacementPolicy isolationPolicy = new IsolatedBookieEnsemblePlacementPolicy(); + ClientConfiguration bkClientConf = new ClientConfiguration(); + bkClientConf.setProperty(BookieRackAffinityMapping.METADATA_STORE_INSTANCE, store); + bkClientConf.setProperty(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, isolationGroups); + isolationPolicy.initialize(bkClientConf, Optional.empty(), timer, SettableFeatureProvider.DISABLE_ALL, NullStatsLogger.INSTANCE, BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + isolationPolicy.onClusterChanged(writableBookies, readOnlyBookies); + + MutablePair, Set> groups = new MutablePair<>(); + groups.setLeft(Sets.newHashSet("group1")); + groups.setRight(new HashSet<>()); + + //initialFuture, the future is waiting done. + Set blacklist = + isolationPolicy.getExcludedBookiesWithIsolationGroups(2, groups); + assertTrue(blacklist.isEmpty()); + + //waitingCompleteFuture, the future is waiting done. + blacklist = + isolationPolicy.getExcludedBookiesWithIsolationGroups(2, groups); + assertTrue(blacklist.isEmpty()); + + Thread.sleep(waitTime); + + //waitingCompleteFuture, the future is already done. + blacklist = + isolationPolicy.getExcludedBookiesWithIsolationGroups(2, groups); + assertFalse(blacklist.isEmpty()); + assertEquals(blacklist.size(), 1); + BookieId excludeBookie = blacklist.iterator().next(); + assertEquals(excludeBookie.toString(), BOOKIE3); + + //emptyFuture, the future is waiting done. + blacklist = + isolationPolicy.getExcludedBookiesWithIsolationGroups(2, groups); + assertFalse(blacklist.isEmpty()); + assertEquals(blacklist.size(), 1); + excludeBookie = blacklist.iterator().next(); + assertEquals(excludeBookie.toString(), BOOKIE3); + + Thread.sleep(longWaitTime - waitTime); + + //emptyFuture, the future is already done. + blacklist = + isolationPolicy.getExcludedBookiesWithIsolationGroups(2, groups); + assertTrue(blacklist.isEmpty()); + } + @Test public void testBasic() throws Exception { Map> bookieMapping = new HashMap<>(); diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java index 7e329d14307c7..ed9626dffe23f 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java @@ -24,13 +24,16 @@ import io.jsonwebtoken.Jwts; import io.jsonwebtoken.SignatureAlgorithm; import java.util.Properties; +import java.util.function.Function; +import lombok.Cleanup; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; +import org.apache.pulsar.broker.authentication.AuthenticationDataSubscription; import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; import org.apache.pulsar.broker.resources.PulsarResources; import org.testng.annotations.Test; - import javax.crypto.SecretKey; +import java.util.Set; import java.util.concurrent.CompletableFuture; public class MultiRolesTokenAuthorizationProviderTest { @@ -43,6 +46,8 @@ public void testMultiRolesAuthz() throws Exception { String token = Jwts.builder().claim("sub", new String[]{userA, userB}).signWith(secretKey).compact(); MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + ServiceConfiguration conf = new ServiceConfiguration(); + provider.initialize(conf, mock(PulsarResources.class)); AuthenticationDataSource ads = new AuthenticationDataSource() { @Override @@ -60,18 +65,18 @@ public String getHttpHeader(String name) { } }; - assertTrue(provider.authorize(ads, role -> { + assertTrue(provider.authorize("test", ads, role -> { if (role.equals(userB)) { return CompletableFuture.completedFuture(true); // only userB has permission } return CompletableFuture.completedFuture(false); }).get()); - assertTrue(provider.authorize(ads, role -> { + assertTrue(provider.authorize("test", ads, role -> { return CompletableFuture.completedFuture(true); // all users has permission }).get()); - assertFalse(provider.authorize(ads, role -> { + assertFalse(provider.authorize("test", ads, role -> { return CompletableFuture.completedFuture(false); // all users has no permission }).get()); } @@ -82,6 +87,8 @@ public void testMultiRolesAuthzWithEmptyRoles() throws Exception { String token = Jwts.builder().claim("sub", new String[]{}).signWith(secretKey).compact(); MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + ServiceConfiguration conf = new ServiceConfiguration(); + provider.initialize(conf, mock(PulsarResources.class)); AuthenticationDataSource ads = new AuthenticationDataSource() { @Override @@ -99,7 +106,7 @@ public String getHttpHeader(String name) { } }; - assertFalse(provider.authorize(ads, role -> CompletableFuture.completedFuture(false)).get()); + assertFalse(provider.authorize("test", ads, role -> CompletableFuture.completedFuture(false)).get()); } @Test @@ -109,6 +116,8 @@ public void testMultiRolesAuthzWithSingleRole() throws Exception { String token = Jwts.builder().claim("sub", testRole).signWith(secretKey).compact(); MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + ServiceConfiguration conf = new ServiceConfiguration(); + provider.initialize(conf, mock(PulsarResources.class)); AuthenticationDataSource ads = new AuthenticationDataSource() { @Override @@ -126,7 +135,7 @@ public String getHttpHeader(String name) { } }; - assertTrue(provider.authorize(ads, role -> { + assertTrue(provider.authorize("test", ads, role -> { if (role.equals(testRole)) { return CompletableFuture.completedFuture(true); } @@ -134,11 +143,66 @@ public String getHttpHeader(String name) { }).get()); } + @Test + public void testMultiRolesAuthzWithoutClaim() throws Exception { + final SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + final String testRole = "test-role"; + // broker will use "sub" as the claim by default. + final String token = Jwts.builder() + .claim("whatever", testRole).signWith(secretKey).compact(); + ServiceConfiguration conf = new ServiceConfiguration(); + final MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + provider.initialize(conf, mock(PulsarResources.class)); + final AuthenticationDataSource ads = new AuthenticationDataSource() { + @Override + public boolean hasDataFromHttp() { + return true; + } + + @Override + public String getHttpHeader(String name) { + if (name.equals("Authorization")) { + return "Bearer " + token; + } else { + throw new IllegalArgumentException("Wrong HTTP header"); + } + } + }; + + assertFalse(provider.authorize("test", ads, role -> { + if (role == null) { + throw new IllegalStateException("We should avoid pass null to sub providers"); + } + return CompletableFuture.completedFuture(role.equals(testRole)); + }).get()); + } + + @Test + public void testMultiRolesAuthzWithAnonymousUser() throws Exception { + @Cleanup + MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + ServiceConfiguration conf = new ServiceConfiguration(); + + provider.initialize(conf, mock(PulsarResources.class)); + + Function> authorizeFunc = (String role) -> { + if (role.equals("test-role")) { + return CompletableFuture.completedFuture(true); + } + return CompletableFuture.completedFuture(false); + }; + assertTrue(provider.authorize("test-role", null, authorizeFunc).get()); + assertFalse(provider.authorize("test-role-x", null, authorizeFunc).get()); + assertTrue(provider.authorize("test-role", new AuthenticationDataSubscription(null, "test-sub"), authorizeFunc).get()); + } + @Test public void testMultiRolesNotFailNonJWT() throws Exception { String token = "a-non-jwt-token"; MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + ServiceConfiguration conf = new ServiceConfiguration(); + provider.initialize(conf, mock(PulsarResources.class)); AuthenticationDataSource ads = new AuthenticationDataSource() { @Override @@ -156,7 +220,7 @@ public String getHttpHeader(String name) { } }; - assertFalse(provider.authorize(ads, role -> CompletableFuture.completedFuture(false)).get()); + assertFalse(provider.authorize("test", ads, role -> CompletableFuture.completedFuture(false)).get()); } @Test @@ -191,11 +255,51 @@ public String getHttpHeader(String name) { } }; - assertTrue(provider.authorize(ads, role -> { + assertTrue(provider.authorize("test", ads, role -> { if (role.equals(testRole)) { return CompletableFuture.completedFuture(true); } return CompletableFuture.completedFuture(false); }).get()); } + + @Test + public void testMultiRolesAuthzWithSuperUser() throws Exception { + SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + String testAdminRole = "admin"; + String token = Jwts.builder().claim("sub", testAdminRole).signWith(secretKey).compact(); + + ServiceConfiguration conf = new ServiceConfiguration(); + conf.setSuperUserRoles(Set.of(testAdminRole)); + + MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + provider.initialize(conf, mock(PulsarResources.class)); + + AuthenticationDataSource ads = new AuthenticationDataSource() { + @Override + public boolean hasDataFromHttp() { + return true; + } + + @Override + public String getHttpHeader(String name) { + if (name.equals("Authorization")) { + return "Bearer " + token; + } else { + throw new IllegalArgumentException("Wrong HTTP header"); + } + } + }; + + assertTrue(provider.isSuperUser(testAdminRole, ads, conf).get()); + Function> authorizeFunc = (String role) -> { + if (role.equals("admin1")) { + return CompletableFuture.completedFuture(true); + } + return CompletableFuture.completedFuture(false); + }; + assertTrue(provider.authorize(testAdminRole, ads, (String role) -> CompletableFuture.completedFuture(false)).get()); + assertTrue(provider.authorize("admin1", null, authorizeFunc).get()); + assertFalse(provider.authorize("admin2", null, authorizeFunc).get()); + } } diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java index 85f54a76dc3c4..deb86e1802f6f 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java @@ -18,12 +18,34 @@ */ package org.apache.pulsar.broker.resources; +import static org.apache.pulsar.broker.resources.BaseResources.joinPath; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; + +import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.metadata.api.MetadataStore; +import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; public class NamespaceResourcesTest { + + private MetadataStore localStore; + private MetadataStore configurationStore; + private NamespaceResources namespaceResources; + + private static final String BUNDLE_DATA_BASE_PATH = "/loadbalance/bundle-data"; + + @BeforeMethod + public void setup() { + localStore = mock(MetadataStore.class); + configurationStore = mock(MetadataStore.class); + namespaceResources = new NamespaceResources(localStore, configurationStore, 30); + } + @Test public void test_pathIsFromNamespace() { assertFalse(NamespaceResources.pathIsFromNamespace("/admin/clusters")); @@ -31,4 +53,26 @@ public void test_pathIsFromNamespace() { assertFalse(NamespaceResources.pathIsFromNamespace("/admin/policies/my-tenant")); assertTrue(NamespaceResources.pathIsFromNamespace("/admin/policies/my-tenant/my-ns")); } + + /** + * Test that the bundle-data node is deleted from the local stores. + */ + @Test + public void testDeleteBundleDataAsync() { + NamespaceName ns = NamespaceName.get("my-tenant/my-ns"); + String namespaceBundlePath = joinPath(BUNDLE_DATA_BASE_PATH, ns.toString()); + namespaceResources.deleteBundleDataAsync(ns); + + String tenant="my-tenant"; + String tenantBundlePath = joinPath(BUNDLE_DATA_BASE_PATH, tenant); + namespaceResources.deleteBundleDataTenantAsync(tenant); + + verify(localStore).deleteRecursive(namespaceBundlePath); + verify(localStore).deleteRecursive(tenantBundlePath); + + assertThrows(()-> verify(configurationStore).deleteRecursive(namespaceBundlePath)); + assertThrows(()-> verify(configurationStore).deleteRecursive(tenantBundlePath)); + } + + } \ No newline at end of file diff --git a/pulsar-broker/pom.xml b/pulsar-broker/pom.xml index 4c39d7c4b79d0..c86934fe56313 100644 --- a/pulsar-broker/pom.xml +++ b/pulsar-broker/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-broker jar Pulsar Broker - commons-codec commons-codec - org.apache.commons commons-collections4 - org.apache.commons commons-lang3 - org.slf4j slf4j-api - io.netty netty-transport - com.google.protobuf protobuf-java - ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} pulsar-websocket ${project.version} - ${project.groupId} pulsar-client-admin-original ${project.version} - ${project.groupId} managed-ledger ${project.version} - org.apache.curator curator-recipes - org.apache.bookkeeper stream-storage-server @@ -119,170 +105,139 @@ - org.apache.bookkeeper bookkeeper-tools-framework - ${project.groupId} pulsar-broker-common ${project.version} - ${project.groupId} pulsar-transaction-common ${project.version} - ${project.groupId} pulsar-io-batch-discovery-triggerers ${project.version} test - ${project.groupId} testmocks ${project.version} test - com.github.tomakehurst wiremock-jre8 ${wiremock.version} test - - io.dropwizard.metrics - metrics-core + io.dropwizard.metrics + metrics-core - - org.xerial.snappy - snappy-java + org.xerial.snappy + snappy-java - - ${project.groupId} pulsar-functions-worker ${project.version} - ${project.groupId} pulsar-functions-local-runner-original ${project.version} test - ${project.groupId} pulsar-client-messagecrypto-bc ${project.version} - org.awaitility awaitility test - - org.eclipse.jetty jetty-server - org.eclipse.jetty jetty-alpn-conscrypt-server - org.eclipse.jetty jetty-servlet - org.eclipse.jetty jetty-servlets - org.glassfish.jersey.core jersey-server - org.glassfish.jersey.containers jersey-container-servlet-core - org.glassfish.jersey.containers jersey-container-servlet - org.glassfish.jersey.media jersey-media-json-jackson - org.glassfish.jersey.test-framework jersey-test-framework-core test ${jersey.version} - org.glassfish.jersey.test-framework.providers jersey-test-framework-provider-grizzly2 test ${jersey.version} - jakarta.activation jakarta.activation-api - com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider - org.glassfish.jersey.inject jersey-hk2 - com.fasterxml.jackson.module jackson-module-jsonSchema - org.slf4j jcl-over-slf4j - com.google.guava guava - ${project.groupId} pulsar-docs-tools @@ -294,80 +249,65 @@ - com.beust jcommander - io.swagger swagger-annotations provided - io.prometheus simpleclient - io.prometheus simpleclient_jetty - io.prometheus simpleclient_hotspot - io.prometheus simpleclient_caffeine - io.swagger swagger-core provided - org.hdrhistogram HdrHistogram - com.google.code.gson gson - com.github.zafarkhaja java-semver - org.apache.avro avro ${avro.version} - com.carrotsearch hppc - org.roaringbitmap RoaringBitmap - com.github.oshi oshi-core-java11 - ${project.groupId} pulsar-functions-api-examples @@ -375,7 +315,6 @@ pom test - ${project.groupId} pulsar-functions-api-examples-builtin @@ -383,7 +322,6 @@ pom test - ${project.groupId} pulsar-io-batch-data-generator @@ -391,7 +329,6 @@ pom test - ${project.groupId} pulsar-io-data-generator @@ -399,7 +336,6 @@ pom test - ${project.groupId} pulsar-metadata @@ -407,7 +343,6 @@ test-jar test - javax.xml.bind jaxb-api @@ -418,42 +353,33 @@ - com.sun.activation javax.activation - - ${project.groupId} pulsar-transaction-coordinator ${project.version} - - ${project.groupId} pulsar-package-core ${project.version} - io.etcd jetcd-test test - ${project.groupId} pulsar-package-filesystem-storage ${project.version} - - @@ -476,7 +402,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -507,7 +432,6 @@ - maven-dependency-plugin @@ -560,7 +484,6 @@ - org.apache.maven.plugins maven-surefire-plugin @@ -576,12 +499,12 @@ - org.xolstice.maven.plugins protobuf-maven-plugin ${protobuf-maven-plugin.version} + com.google.protobuf:protoc:${protoc3.version}:exe:${os.detected.classifier} true @@ -645,7 +568,6 @@ - maven-resources-plugin @@ -676,7 +598,6 @@ - @@ -695,7 +616,6 @@ test-jar test - ${project.groupId} pulsar-package-core @@ -703,7 +623,6 @@ test-jar test - ${project.groupId} pulsar-metadata diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java index 0ecca75595603..e5293cee24e4a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java @@ -219,7 +219,7 @@ static void setDefaultEnsemblePlacementPolicy( } } - private void setEnsemblePlacementPolicy(ClientConfiguration bkConf, ServiceConfiguration conf, MetadataStore store, + static void setEnsemblePlacementPolicy(ClientConfiguration bkConf, ServiceConfiguration conf, MetadataStore store, Class policyClass) { bkConf.setEnsemblePlacementPolicy(policyClass); bkConf.setProperty(BookieRackAffinityMapping.METADATA_STORE_INSTANCE, store); @@ -227,6 +227,9 @@ private void setEnsemblePlacementPolicy(ClientConfiguration bkConf, ServiceConfi bkConf.setProperty(REPP_DNS_RESOLVER_CLASS, conf.getProperties().getProperty(REPP_DNS_RESOLVER_CLASS, BookieRackAffinityMapping.class.getName())); + bkConf.setMinNumRacksPerWriteQuorum(conf.getBookkeeperClientMinNumRacksPerWriteQuorum()); + bkConf.setEnforceMinNumRacksPerWriteQuorum(conf.isBookkeeperClientEnforceMinNumRacksPerWriteQuorum()); + bkConf.setProperty(NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, conf.getProperties().getProperty( NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java index 4ffb5b77d5424..64f4ee0288122 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java @@ -271,6 +271,7 @@ public class PulsarService implements AutoCloseable, ShutdownService { private TransactionPendingAckStoreProvider transactionPendingAckStoreProvider; private final ExecutorProvider transactionExecutorProvider; + private String brokerId; public enum State { Init, Started, Closing, Closed @@ -303,6 +304,7 @@ public PulsarService(ServiceConfiguration config, // Validate correctness of configuration PulsarConfigurationLoader.isComplete(config); TransactionBatchedWriteValidator.validate(config); + this.config = config; // validate `advertisedAddress`, `advertisedListeners`, `internalListenerName` this.advertisedListeners = MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config); @@ -313,7 +315,6 @@ public PulsarService(ServiceConfiguration config, // use `internalListenerName` listener as `advertisedAddress` this.bindAddress = ServiceConfigurationUtils.getDefaultOrConfiguredAddress(config.getBindAddress()); this.brokerVersion = PulsarVersion.getVersion(); - this.config = config; this.processTerminator = processTerminator; this.loadManagerExecutor = Executors .newSingleThreadScheduledExecutor(new ExecutorProvider.ExtendedThreadFactory("pulsar-load-manager")); @@ -640,14 +641,18 @@ private synchronized void resetMetricsServlet() { } private CompletableFuture addTimeoutHandling(CompletableFuture future) { + long brokerShutdownTimeoutMs = getConfiguration().getBrokerShutdownTimeoutMs(); + if (brokerShutdownTimeoutMs <= 0) { + return future; + } ScheduledExecutorService shutdownExecutor = Executors.newSingleThreadScheduledExecutor( new ExecutorProvider.ExtendedThreadFactory(getClass().getSimpleName() + "-shutdown")); FutureUtil.addTimeoutHandling(future, - Duration.ofMillis(Math.max(1L, getConfiguration().getBrokerShutdownTimeoutMs())), + Duration.ofMillis(brokerShutdownTimeoutMs), shutdownExecutor, () -> FutureUtil.createTimeoutException("Timeout in close", getClass(), "close")); future.handle((v, t) -> { - if (t != null && getConfiguration().getBrokerShutdownTimeoutMs() > 0) { - LOG.info("Shutdown timed out after {} ms", getConfiguration().getBrokerShutdownTimeoutMs()); + if (t instanceof TimeoutException) { + LOG.info("Shutdown timed out after {} ms", brokerShutdownTimeoutMs); LOG.info(ThreadDumpUtil.buildThreadDiagnosticString()); } // shutdown the shutdown executor @@ -824,6 +829,12 @@ public void start() throws PulsarServerException { this.brokerServiceUrl = brokerUrl(config); this.brokerServiceUrlTls = brokerUrlTls(config); + // the broker id is used in the load manager to identify the broker + // it should not be used for making connections to the broker + this.brokerId = + String.format("%s:%s", advertisedAddress, config.getWebServicePort() + .or(config::getWebServicePortTls).orElseThrow()); + if (this.compactionServiceFactory == null) { this.compactionServiceFactory = loadCompactionServiceFactory(); } @@ -1090,7 +1101,7 @@ private void addWebSocketServiceHandler(WebService webService, } private void handleDeleteCluster(Notification notification) { - if (ClusterResources.pathRepresentsClusterName(notification.getPath()) + if (isRunning() && ClusterResources.pathRepresentsClusterName(notification.getPath()) && notification.getType() == NotificationType.Deleted) { final String clusterName = ClusterResources.clusterNameFromPath(notification.getPath()); getBrokerService().closeAndRemoveReplicationClient(clusterName); @@ -1128,7 +1139,8 @@ protected void startLeaderElectionService() { LOG.info("The load manager extension is enabled. Skipping PulsarService LeaderElectionService."); return; } - this.leaderElectionService = new LeaderElectionService(coordinationService, getSafeWebServiceAddress(), + this.leaderElectionService = + new LeaderElectionService(coordinationService, getBrokerId(), getSafeWebServiceAddress(), state -> { if (state == LeaderElectionState.Leading) { LOG.info("This broker was elected leader"); @@ -1176,7 +1188,7 @@ protected void startLeaderElectionService() { protected void acquireSLANamespace() { try { // Namespace not created hence no need to unload it - NamespaceName nsName = NamespaceService.getSLAMonitorNamespace(getAdvertisedAddress(), config); + NamespaceName nsName = NamespaceService.getSLAMonitorNamespace(getBrokerId(), config); if (!this.pulsarResources.getNamespaceResources().namespaceExists(nsName)) { LOG.info("SLA Namespace = {} doesn't exist.", nsName); return; @@ -1729,18 +1741,24 @@ public static String webAddressTls(String host, int port) { } public String getSafeWebServiceAddress() { - return webServiceAddress != null ? webServiceAddress : webServiceAddressTls; + return webServiceAddressTls != null ? webServiceAddressTls : webServiceAddress; } @Deprecated public String getSafeBrokerServiceUrl() { - return brokerServiceUrl != null ? brokerServiceUrl : brokerServiceUrlTls; + return brokerServiceUrlTls != null ? brokerServiceUrlTls : brokerServiceUrl; } - public String getLookupServiceAddress() { - return String.format("%s:%s", advertisedAddress, config.getWebServicePort().isPresent() - ? config.getWebServicePort().get() - : config.getWebServicePortTls().orElseThrow()); + /** + * Return the broker id. The broker id is used in the load manager to uniquely identify the broker at runtime. + * It should not be used for making connections to the broker. The broker id is available after {@link #start()} + * has been called. + * + * @return broker id + */ + public String getBrokerId() { + return Objects.requireNonNull(brokerId, + "brokerId is not initialized before start has been called"); } public TopicPoliciesService getTopicPoliciesService() { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java index 828b18c2df9ab..7de426fa1f02f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java @@ -831,6 +831,10 @@ protected static boolean isNotFoundException(Throwable ex) { == Status.NOT_FOUND.getStatusCode(); } + protected static boolean isNot307And404Exception(Throwable ex) { + return !isRedirectException(ex) && !isNotFoundException(ex); + } + protected static String getTopicNotFoundErrorMessage(String topic) { return String.format("Topic %s not found", topic); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java index be8390f15f826..61b354610ac20 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java @@ -26,6 +26,7 @@ import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -34,6 +35,7 @@ import java.util.Objects; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import javax.ws.rs.DELETE; import javax.ws.rs.DefaultValue; @@ -80,12 +82,18 @@ public class BrokersBase extends AdminResource { // log a full thread dump when a deadlock is detected in healthcheck once every 10 minutes // to prevent excessive logging private static final long LOG_THREADDUMP_INTERVAL_WHEN_DEADLOCK_DETECTED = 600000L; + // there is a timeout of 60 seconds default in the client(readTimeoutMs), so we need to set the timeout + // a bit shorter than 60 seconds to avoid the client timeout exception thrown before the server timeout exception. + // or we can't propagate the server timeout exception to the client. + private static final Duration HEALTH_CHECK_READ_TIMEOUT = Duration.ofSeconds(58); + private static final TimeoutException HEALTH_CHECK_TIMEOUT_EXCEPTION = + FutureUtil.createTimeoutException("Timeout", BrokersBase.class, "healthCheckRecursiveReadNext(...)"); private volatile long threadDumpLoggedTimestamp; @GET @Path("/{cluster}") @ApiOperation( - value = "Get the list of active brokers (web service addresses) in the cluster." + value = "Get the list of active brokers (broker ids) in the cluster." + "If authorization is not enabled, any cluster name is valid.", response = String.class, responseContainer = "Set") @@ -115,7 +123,7 @@ public void getActiveBrokers(@Suspended final AsyncResponse asyncResponse, @GET @ApiOperation( - value = "Get the list of active brokers (web service addresses) in the local cluster." + value = "Get the list of active brokers (broker ids) in the local cluster." + "If authorization is not enabled", response = String.class, responseContainer = "Set") @@ -141,7 +149,9 @@ public void getLeaderBroker(@Suspended final AsyncResponse asyncResponse) { validateSuperUserAccessAsync().thenAccept(__ -> { LeaderBroker leaderBroker = pulsar().getLeaderElectionService().getCurrentLeader() .orElseThrow(() -> new RestException(Status.NOT_FOUND, "Couldn't find leader broker")); - BrokerInfo brokerInfo = BrokerInfo.builder().serviceUrl(leaderBroker.getServiceUrl()).build(); + BrokerInfo brokerInfo = BrokerInfo.builder() + .serviceUrl(leaderBroker.getServiceUrl()) + .brokerId(leaderBroker.getBrokerId()).build(); LOG.info("[{}] Successfully to get the information of the leader broker.", clientAppId()); asyncResponse.resume(brokerInfo); }) @@ -153,8 +163,8 @@ public void getLeaderBroker(@Suspended final AsyncResponse asyncResponse) { } @GET - @Path("/{clusterName}/{broker-webserviceurl}/ownedNamespaces") - @ApiOperation(value = "Get the list of namespaces served by the specific broker", + @Path("/{clusterName}/{brokerId}/ownedNamespaces") + @ApiOperation(value = "Get the list of namespaces served by the specific broker id", response = NamespaceOwnershipStatus.class, responseContainer = "Map") @ApiResponses(value = { @ApiResponse(code = 307, message = "Current broker doesn't serve the cluster"), @@ -162,9 +172,9 @@ public void getLeaderBroker(@Suspended final AsyncResponse asyncResponse) { @ApiResponse(code = 404, message = "Cluster doesn't exist") }) public void getOwnedNamespaces(@Suspended final AsyncResponse asyncResponse, @PathParam("clusterName") String cluster, - @PathParam("broker-webserviceurl") String broker) { + @PathParam("brokerId") String brokerId) { validateSuperUserAccessAsync() - .thenAccept(__ -> validateBrokerName(broker)) + .thenCompose(__ -> maybeRedirectToBroker(brokerId)) .thenCompose(__ -> validateClusterOwnershipAsync(cluster)) .thenCompose(__ -> pulsar().getNamespaceService().getOwnedNameSpacesStatusAsync()) .thenAccept(asyncResponse::resume) @@ -172,7 +182,7 @@ public void getOwnedNamespaces(@Suspended final AsyncResponse asyncResponse, // If the exception is not redirect exception we need to log it. if (!isRedirectException(ex)) { LOG.error("[{}] Failed to get the namespace ownership status. cluster={}, broker={}", - clientAppId(), cluster, broker); + clientAppId(), cluster, brokerId); } resumeAsyncResponseExceptionally(asyncResponse, ex); return null; @@ -396,9 +406,10 @@ private void checkDeadlockedThreads() { private CompletableFuture internalRunHealthCheck(TopicVersion topicVersion) { + String brokerId = pulsar().getBrokerId(); NamespaceName namespaceName = (topicVersion == TopicVersion.V2) - ? NamespaceService.getHeartbeatNamespaceV2(pulsar().getAdvertisedAddress(), pulsar().getConfiguration()) - : NamespaceService.getHeartbeatNamespace(pulsar().getAdvertisedAddress(), pulsar().getConfiguration()); + ? NamespaceService.getHeartbeatNamespaceV2(brokerId, pulsar().getConfiguration()) + : NamespaceService.getHeartbeatNamespace(brokerId, pulsar().getConfiguration()); final String topicName = String.format("persistent://%s/%s", namespaceName, HEALTH_CHECK_TOPIC_SUFFIX); LOG.info("[{}] Running healthCheck with topic={}", clientAppId(), topicName); final String messageStr = UUID.randomUUID().toString(); @@ -431,7 +442,10 @@ private CompletableFuture internalRunHealthCheck(TopicVersion topicVersion }); throw FutureUtil.wrapToCompletionException(createException); }).thenCompose(reader -> producer.sendAsync(messageStr) - .thenCompose(__ -> healthCheckRecursiveReadNext(reader, messageStr)) + .thenCompose(__ -> FutureUtil.addTimeoutHandling( + healthCheckRecursiveReadNext(reader, messageStr), + HEALTH_CHECK_READ_TIMEOUT, pulsar().getBrokerService().executor(), + () -> HEALTH_CHECK_TIMEOUT_EXCEPTION)) .whenComplete((__, ex) -> { closeAndReCheck(producer, reader, topicOptional.get(), subscriptionName) .whenComplete((unused, innerEx) -> { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java index 789a8e6dbdcb7..144545abb2cd1 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java @@ -279,12 +279,6 @@ private void internalRetryableDeleteNamespaceAsync0(boolean force, int retryTime return old; }); } - allUserCreatedTopics.removeIf(t -> - allPartitionedTopics.contains(TopicName.get(t).getPartitionedTopicName())); - allSystemTopics.removeIf(t -> - allPartitionedTopics.contains(TopicName.get(t).getPartitionedTopicName())); - topicPolicy.removeIf(t -> - allPartitionedTopics.contains(TopicName.get(t).getPartitionedTopicName())); return markDeleteFuture.thenCompose(__ -> internalDeleteTopicsAsync(allUserCreatedTopics)) .thenCompose(ignore -> @@ -673,7 +667,9 @@ protected CompletableFuture internalSetNamespaceReplicationClusters(List validatePoliciesReadOnlyAccessAsync()) .thenApply(__ -> { - checkNotNull(clusterIds, "ClusterIds should not be null"); + if (CollectionUtils.isEmpty(clusterIds)) { + throw new RestException(Status.PRECONDITION_FAILED, "ClusterIds should not be null or empty"); + } if (!namespaceName.isGlobal() && !(clusterIds.size() == 1 && clusterIds.get(0).equals(pulsar().getConfiguration().getClusterName()))) { throw new RestException(Status.PRECONDITION_FAILED, @@ -926,9 +922,9 @@ private CompletableFuture validateLeaderBrokerAsync() { return FutureUtil.failedFuture(new RestException(Response.Status.PRECONDITION_FAILED, errorStr)); } LeaderBroker leaderBroker = pulsar().getLeaderElectionService().getCurrentLeader().get(); - String leaderBrokerUrl = leaderBroker.getServiceUrl(); + String leaderBrokerId = leaderBroker.getBrokerId(); return pulsar().getNamespaceService() - .createLookupResult(leaderBrokerUrl, false, null) + .createLookupResult(leaderBrokerId, false, null) .thenCompose(lookupResult -> { String redirectUrl = isRequestHttps() ? lookupResult.getLookupData().getHttpUrlTls() : lookupResult.getLookupData().getHttpUrl(); @@ -951,7 +947,7 @@ private CompletableFuture validateLeaderBrokerAsync() { return FutureUtil.failedFuture(( new WebApplicationException(Response.temporaryRedirect(redirect).build()))); } catch (MalformedURLException exception) { - log.error("The leader broker url is malformed - {}", leaderBrokerUrl); + log.error("The redirect url is malformed - {}", redirectUrl); return FutureUtil.failedFuture(new RestException(exception)); } }); @@ -987,8 +983,11 @@ public CompletableFuture setNamespaceBundleAffinityAsync(String bundleRang } public CompletableFuture internalUnloadNamespaceBundleAsync(String bundleRange, - String destinationBroker, + String destinationBrokerParam, boolean authoritative) { + String destinationBroker = StringUtils.isBlank(destinationBrokerParam) ? null : + // ensure backward compatibility: strip the possible http:// or https:// prefix + destinationBrokerParam.replaceFirst("http[s]?://", ""); return validateSuperUserAccessAsync() .thenCompose(__ -> setNamespaceBundleAffinityAsync(bundleRange, destinationBroker)) .thenAccept(__ -> { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java index 682eb7b12e97e..c5a8d83f9c573 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java @@ -46,6 +46,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.IntStream; import javax.annotation.Nonnull; @@ -147,6 +148,8 @@ import org.apache.pulsar.common.util.DateFormatter; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.collections.BitSetRecyclable; +import org.apache.pulsar.compaction.Compactor; +import org.apache.pulsar.compaction.PulsarTopicCompactionService; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; import org.slf4j.Logger; @@ -721,7 +724,9 @@ protected void internalDeletePartitionedTopic(AsyncResponse asyncResponse, .thenCompose(unused -> internalRemovePartitionsTopicAsync(numPartitions, force)); }) // Only tries to delete the znode for partitioned topic when all its partitions are successfully deleted - ).thenCompose(__ -> getPulsarResources().getNamespaceResources().getPartitionedTopicResources() + ).thenCompose(ignore -> + pulsar().getBrokerService().deleteSchema(topicName).exceptionally(ex -> null)) + .thenCompose(__ -> getPulsarResources().getNamespaceResources().getPartitionedTopicResources() .runWithMarkDeleteAsync(topicName, () -> namespaceResources() .getPartitionedTopicResources().deletePartitionedTopicAsync(topicName))) .thenAccept(__ -> { @@ -882,7 +887,7 @@ protected void internalUnloadTopic(AsyncResponse asyncResponse, boolean authorit } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get partitioned metadata while unloading topic {}", clientAppId(), topicName, ex); } @@ -892,7 +897,7 @@ protected void internalUnloadTopic(AsyncResponse asyncResponse, boolean authorit } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to validate the global namespace ownership while unloading topic {}", clientAppId(), topicName, ex); } @@ -1060,7 +1065,7 @@ private void internalUnloadNonPartitionedTopicAsync(AsyncResponse asyncResponse, })) .exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to unload topic {}, {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -1082,7 +1087,7 @@ private void internalUnloadTransactionCoordinatorAsync(AsyncResponse asyncRespon })) .exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to unload tc {},{}", clientAppId(), topicName.getPartitionIndex(), ex); } @@ -1184,7 +1189,7 @@ protected void internalGetSubscriptions(AsyncResponse asyncResponse, boolean aut } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get partitioned topic metadata while get" + " subscriptions for topic {}", clientAppId(), topicName, ex); } @@ -1194,7 +1199,7 @@ protected void internalGetSubscriptions(AsyncResponse asyncResponse, boolean aut } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to validate the global namespace/topic ownership while get subscriptions" + " for topic {}", clientAppId(), topicName, ex); } @@ -1203,7 +1208,7 @@ protected void internalGetSubscriptions(AsyncResponse asyncResponse, boolean aut }) ).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get subscriptions for {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -1242,7 +1247,7 @@ private void internalGetSubscriptionsForNonPartitionedTopic(AsyncResponse asyncR .thenAccept(topic -> asyncResponse.resume(new ArrayList<>(topic.getSubscriptions().keys()))) .exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get list of subscriptions for {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -1354,7 +1359,7 @@ protected void internalGetManagedLedgerInfo(AsyncResponse asyncResponse, boolean } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get partitioned metadata while get managed info for {}", clientAppId(), topicName, ex); } @@ -1364,7 +1369,7 @@ protected void internalGetManagedLedgerInfo(AsyncResponse asyncResponse, boolean } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to validate the global namespace ownership while get managed info for {}", clientAppId(), topicName, ex); } @@ -1478,7 +1483,7 @@ protected void internalGetPartitionedStats(AsyncResponse asyncResponse, boolean }); }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get partitioned internal stats for {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -1533,7 +1538,7 @@ protected void internalGetPartitionedStatsInternal(AsyncResponse asyncResponse, }); }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get partitioned internal stats for {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -1661,7 +1666,7 @@ private void internalAnalyzeSubscriptionBacklogForNonPartitionedTopic(AsyncRespo }).exceptionally(ex -> { Throwable cause = ex.getCause(); // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to analyze subscription backlog {} {}", clientAppId(), topicName, subName, cause); } @@ -1688,7 +1693,7 @@ private void internalUpdateSubscriptionPropertiesForNonPartitionedTopic(AsyncRes }).exceptionally(ex -> { Throwable cause = ex.getCause(); // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to update subscription {} {}", clientAppId(), topicName, subName, cause); } asyncResponse.resume(new RestException(cause)); @@ -1717,7 +1722,7 @@ private void internalGetSubscriptionPropertiesForNonPartitionedTopic(AsyncRespon }).exceptionally(ex -> { Throwable cause = ex.getCause(); // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to update subscription {} {}", clientAppId(), topicName, subName, cause); } asyncResponse.resume(new RestException(cause)); @@ -1886,7 +1891,7 @@ protected void internalSkipAllMessages(AsyncResponse asyncResponse, String subNa } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to skip all messages for subscription {} on topic {}", clientAppId(), subName, topicName, ex); } @@ -1930,7 +1935,7 @@ private CompletableFuture internalSkipAllMessagesForNonPartitionedTopicAsy } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to skip all messages for subscription {} on topic {}", clientAppId(), subName, topicName, ex); } @@ -1994,7 +1999,7 @@ protected void internalSkipMessages(AsyncResponse asyncResponse, String subName, }) ).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to skip {} messages {} {}", clientAppId(), numMessages, topicName, subName, ex); } @@ -2064,7 +2069,7 @@ protected void internalExpireMessagesForAllSubscriptions(AsyncResponse asyncResp ) ).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to expire messages for all subscription on topic {}", clientAppId(), topicName, ex); } @@ -2097,10 +2102,9 @@ private void internalExpireMessagesForAllSubscriptionsForNonPartitionedTopic(Asy final List> futures = new ArrayList<>((int) topic.getReplicators().size()); List subNames = - new ArrayList<>((int) topic.getReplicators().size() - + (int) topic.getSubscriptions().size()); - subNames.addAll(topic.getReplicators().keys()); - subNames.addAll(topic.getSubscriptions().keys()); + new ArrayList<>((int) topic.getSubscriptions().size()); + subNames.addAll(topic.getSubscriptions().keys().stream().filter( + subName -> !subName.equals(Compactor.COMPACTION_SUBSCRIPTION)).toList()); for (int i = 0; i < subNames.size(); i++) { try { futures.add(internalExpireMessagesByTimestampForSinglePartitionAsync(partitionMetadata, @@ -2132,7 +2136,7 @@ private void internalExpireMessagesForAllSubscriptionsForNonPartitionedTopic(Asy }) ).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to expire messages for all subscription up to {} on {}", clientAppId(), expireTimeInSeconds, topicName, ex); } @@ -2339,7 +2343,7 @@ protected void internalCreateSubscription(AsyncResponse asyncResponse, String su })).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to create subscription {} on topic {}", clientAppId(), subscriptionName, topicName, ex); } @@ -2349,7 +2353,7 @@ protected void internalCreateSubscription(AsyncResponse asyncResponse, String su } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to create subscription {} on topic {}", clientAppId(), subscriptionName, topicName, ex); } @@ -2480,7 +2484,7 @@ protected void internalUpdateSubscriptionProperties(AsyncResponse asyncResponse, } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to update subscription {} from topic {}", clientAppId(), subName, topicName, ex); } @@ -2520,7 +2524,7 @@ protected void internalAnalyzeSubscriptionBacklog(AsyncResponse asyncResponse, S }) .exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to analyze back log of subscription {} from topic {}", clientAppId(), subName, topicName, ex); } @@ -2605,7 +2609,7 @@ protected void internalGetSubscriptionProperties(AsyncResponse asyncResponse, St } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to update subscription {} from topic {}", clientAppId(), subName, topicName, ex); } @@ -2691,7 +2695,7 @@ protected void internalResetCursorOnPosition(AsyncResponse asyncResponse, String }); }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.warn("[{}][{}] Failed to reset cursor on subscription {} to position {}", clientAppId(), topicName, subName, messageId, ex.getCause()); } @@ -2700,7 +2704,7 @@ protected void internalResetCursorOnPosition(AsyncResponse asyncResponse, String }); }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.warn("[{}][{}] Failed to reset cursor on subscription {} to position {}", clientAppId(), topicName, subName, messageId, ex.getCause()); } @@ -2827,6 +2831,9 @@ protected CompletableFuture internalGetMessageById(long ledgerId, long @Override public void readEntryFailed(ManagedLedgerException exception, Object ctx) { + if (exception instanceof ManagedLedgerException.LedgerNotExistException) { + throw new RestException(Status.NOT_FOUND, "Message id not found"); + } throw new RestException(exception); } @@ -2877,28 +2884,68 @@ protected CompletableFuture internalGetMessageIdByTimestampAsync(long throw new RestException(Status.METHOD_NOT_ALLOWED, "Get message ID by timestamp on a non-persistent topic is not allowed"); } - ManagedLedger ledger = ((PersistentTopic) topic).getManagedLedger(); - return ledger.asyncFindPosition(entry -> { + final PersistentTopic persistentTopic = (PersistentTopic) topic; + + return persistentTopic.getTopicCompactionService().readLastCompactedEntry().thenCompose(lastEntry -> { + if (lastEntry == null) { + return findMessageIdByPublishTime(timestamp, persistentTopic.getManagedLedger()); + } + MessageMetadata metadata; + Position position = lastEntry.getPosition(); try { - long entryTimestamp = Commands.getEntryTimestamp(entry.getDataBuffer()); - return MessageImpl.isEntryPublishedEarlierThan(entryTimestamp, timestamp); - } catch (Exception e) { - log.error("[{}] Error deserializing message for message position find", topicName, e); + metadata = Commands.parseMessageMetadata(lastEntry.getDataBuffer()); } finally { - entry.release(); + lastEntry.release(); } - return false; - }).thenApply(position -> { - if (position == null) { - return null; + if (timestamp == metadata.getPublishTime()) { + return CompletableFuture.completedFuture(new MessageIdImpl(position.getLedgerId(), + position.getEntryId(), topicName.getPartitionIndex())); + } else if (timestamp < metadata.getPublishTime() + && persistentTopic.getTopicCompactionService() instanceof PulsarTopicCompactionService) { + var compactedTopic = + ((PulsarTopicCompactionService) persistentTopic.getTopicCompactionService()) + .getCompactedTopic(); + final Predicate predicate = entry -> + Commands.parseMessageMetadata(entry.getDataBuffer()).getPublishTime() >= timestamp; + return compactedTopic.findFirstMatchEntry(predicate) + .thenApply(compactedEntry -> { + try { + return new MessageIdImpl(compactedEntry.getLedgerId(), + compactedEntry.getEntryId(), topicName.getPartitionIndex()); + } finally { + compactedEntry.release(); + } + }); } else { - return new MessageIdImpl(position.getLedgerId(), position.getEntryId(), - topicName.getPartitionIndex()); + return findMessageIdByPublishTime(timestamp, persistentTopic.getManagedLedger()); } }); }); } + private CompletableFuture findMessageIdByPublishTime(long timestamp, ManagedLedger managedLedger) { + return managedLedger.asyncFindPosition(entry -> { + try { + long entryTimestamp = Commands.getEntryTimestamp(entry.getDataBuffer()); + return MessageImpl.isEntryPublishedEarlierThan(entryTimestamp, timestamp); + } catch (Exception e) { + log.error("[{}] Error deserializing message for message position find", + topicName, + e); + } finally { + entry.release(); + } + return false; + }).thenApply(position -> { + if (position == null) { + return null; + } else { + return new MessageIdImpl(position.getLedgerId(), position.getEntryId(), + topicName.getPartitionIndex()); + } + }); + } + protected CompletableFuture internalPeekNthMessageAsync(String subName, int messagePosition, boolean authoritative) { CompletableFuture ret; @@ -3281,7 +3328,7 @@ protected void internalGetBacklogSizeByMessageId(AsyncResponse asyncResponse, } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get backlog size for topic {}", clientAppId(), topicName, ex); } @@ -3289,7 +3336,7 @@ protected void internalGetBacklogSizeByMessageId(AsyncResponse asyncResponse, return null; })).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to validate global namespace ownership " + "to get backlog size for topic {}", clientAppId(), topicName, ex); } @@ -3349,6 +3396,9 @@ protected CompletableFuture internalSetReplicationClusters(List cl return validateTopicPolicyOperationAsync(topicName, PolicyName.REPLICATION, PolicyOperation.WRITE) .thenCompose(__ -> validatePoliciesReadOnlyAccessAsync()) .thenCompose(__ -> { + if (CollectionUtils.isEmpty(clusterIds)) { + throw new RestException(Status.PRECONDITION_FAILED, "ClusterIds should not be null or empty"); + } Set replicationClusters = Sets.newHashSet(clusterIds); if (replicationClusters.contains("global")) { throw new RestException(Status.PRECONDITION_FAILED, @@ -3824,7 +3874,7 @@ protected void internalTerminatePartitionedTopic(AsyncResponse asyncResponse, bo } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to terminate topic {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -3832,7 +3882,7 @@ protected void internalTerminatePartitionedTopic(AsyncResponse asyncResponse, bo }) ).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to terminate topic {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -3922,7 +3972,7 @@ protected void internalExpireMessagesByTimestamp(AsyncResponse asyncResponse, St ).exceptionally(ex -> { Throwable cause = FutureUtil.unwrapCompletionException(ex); // If the exception is not redirect exception we need to log it. - if (!isRedirectException(cause)) { + if (!isNot307And404Exception(cause)) { if (cause instanceof RestException) { log.warn("[{}] Failed to expire messages up to {} on {}: {}", clientAppId(), expireTimeInSeconds, topicName, cause.toString()); @@ -4037,7 +4087,7 @@ protected void internalExpireMessagesByPosition(AsyncResponse asyncResponse, Str messageId, isExcluded, batchIndex); }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to expire messages up to {} on subscription {} to position {}", clientAppId(), topicName, subName, messageId, ex); } @@ -4187,7 +4237,7 @@ protected void internalTriggerCompaction(AsyncResponse asyncResponse, boolean au } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to trigger compaction on topic {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -4196,7 +4246,7 @@ protected void internalTriggerCompaction(AsyncResponse asyncResponse, boolean au } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to validate global namespace ownership to trigger compaction on topic {}", clientAppId(), topicName, ex); } @@ -4225,7 +4275,7 @@ protected void internalTriggerCompactionNonPartitionedTopic(AsyncResponse asyncR } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to trigger compaction for {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -4261,7 +4311,7 @@ protected void internalTriggerOffload(AsyncResponse asyncResponse, } }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to trigger offload for {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -4278,7 +4328,7 @@ protected void internalOffloadStatus(AsyncResponse asyncResponse, boolean author asyncResponse.resume(offloadProcessStatus); }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to offload status on topic {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -4603,7 +4653,7 @@ protected void internalGetLastMessageId(AsyncResponse asyncResponse, boolean aut }); }).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get last messageId {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -4981,9 +5031,7 @@ protected CompletableFuture internalRemoveSubscribeRate(boolean isGlobal) protected void handleTopicPolicyException(String methodName, Throwable thr, AsyncResponse asyncResponse) { Throwable cause = thr.getCause(); - if (!(cause instanceof WebApplicationException) || !( - ((WebApplicationException) cause).getResponse().getStatus() == 307 - || ((WebApplicationException) cause).getResponse().getStatus() == 404)) { + if (isNot307And404Exception(cause)) { log.error("[{}] Failed to perform {} on topic {}", clientAppId(), methodName, topicName, cause); } @@ -5109,7 +5157,7 @@ protected void internalSetReplicatedSubscriptionStatus(AsyncResponse asyncRespon resultFuture.exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.warn("[{}] Failed to change replicated subscription status to {} - {} {}", clientAppId(), enabled, topicName, subName, ex); } @@ -5156,7 +5204,7 @@ private void internalSetReplicatedSubscriptionStatusForNonPartitionedTopic( } ).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to set replicated subscription status on {} {}", clientAppId(), topicName, subName, ex); } @@ -5257,7 +5305,7 @@ protected void internalGetReplicatedSubscriptionStatus(AsyncResponse asyncRespon } resultFuture.exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get replicated subscription status on {} {}", clientAppId(), topicName, subName, ex); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java index 0bab772044a6d..286366c8b5834 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java @@ -34,6 +34,7 @@ import org.apache.pulsar.broker.service.schema.SchemaRegistry.SchemaAndMetadata; import org.apache.pulsar.broker.service.schema.SchemaRegistryService; import org.apache.pulsar.broker.web.RestException; +import org.apache.pulsar.client.impl.schema.SchemaUtils; import org.apache.pulsar.client.internal.DefaultImplementation; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; @@ -146,8 +147,13 @@ public CompletableFuture> testCompati .thenCompose(__ -> getSchemaCompatibilityStrategyAsync()) .thenCompose(strategy -> { String schemaId = getSchemaId(); + final SchemaType schemaType = SchemaType.valueOf(payload.getType()); + byte[] data = payload.getSchema().getBytes(StandardCharsets.UTF_8); + if (schemaType.getValue() == SchemaType.KEY_VALUE.getValue()) { + data = SchemaUtils.convertKeyValueDataStringToSchemaInfoSchema(data); + } return pulsar().getSchemaRegistryService().isCompatible(schemaId, - SchemaData.builder().data(payload.getSchema().getBytes(StandardCharsets.UTF_8)) + SchemaData.builder().data(data) .isDeleted(false) .timestamp(clock.millis()).type(SchemaType.valueOf(payload.getType())) .user(defaultIfEmpty(clientAppId(), "")) @@ -161,10 +167,14 @@ public CompletableFuture getVersionBySchemaAsync(PostSchemaPayload payload return validateOwnershipAndOperationAsync(authoritative, TopicOperation.GET_METADATA) .thenCompose(__ -> { String schemaId = getSchemaId(); + final SchemaType schemaType = SchemaType.valueOf(payload.getType()); + byte[] data = payload.getSchema().getBytes(StandardCharsets.UTF_8); + if (schemaType.getValue() == SchemaType.KEY_VALUE.getValue()) { + data = SchemaUtils.convertKeyValueDataStringToSchemaInfoSchema(data); + } return pulsar().getSchemaRegistryService() .findSchemaVersion(schemaId, - SchemaData.builder().data(payload.getSchema().getBytes(StandardCharsets.UTF_8)) - .isDeleted(false).timestamp(clock.millis()) + SchemaData.builder().data(data).isDeleted(false).timestamp(clock.millis()) .type(SchemaType.valueOf(payload.getType())) .user(defaultIfEmpty(clientAppId(), "")) .props(payload.getProperties()).build()); @@ -228,7 +238,7 @@ private CompletableFuture validateOwnershipAndOperationAsync(boolean autho protected boolean shouldPrintErrorLog(Throwable ex) { - return !isRedirectException(ex) && !isNotFoundException(ex); + return isNot307And404Exception(ex); } private static final Logger log = LoggerFactory.getLogger(SchemasResourceBase.class); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java index 3921334cff30a..470cdc3e74ba1 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java @@ -501,11 +501,13 @@ protected CompletableFuture getExistingPersistentTopicAsync(boo CompletableFuture> topicFuture = pulsar().getBrokerService() .getTopics().get(topicName.toString()); if (topicFuture == null) { - return FutureUtil.failedFuture(new RestException(NOT_FOUND, "Topic not found")); + return FutureUtil.failedFuture(new RestException(NOT_FOUND, + String.format("Topic not found %s", topicName.toString()))); } return topicFuture.thenCompose(optionalTopic -> { if (!optionalTopic.isPresent()) { - return FutureUtil.failedFuture(new RestException(NOT_FOUND, "Topic not found")); + return FutureUtil.failedFuture(new RestException(NOT_FOUND, + String.format("Topic not found %s", topicName.toString()))); } return CompletableFuture.completedFuture((PersistentTopic) optionalTopic.get()); }); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java index 0d857f2211f41..1c1dd74719641 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java @@ -284,11 +284,12 @@ public List getListFromBundle(@PathParam("property") String property, @P } } - private Topic getTopicReference(TopicName topicName) { + private Topic getTopicReference(final TopicName topicName) { try { return pulsar().getBrokerService().getTopicIfExists(topicName.toString()) .get(config().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS) - .orElseThrow(() -> new RestException(Status.NOT_FOUND, "Topic not found")); + .orElseThrow(() -> new RestException(Status.NOT_FOUND, + String.format("Topic not found %s", topicName.toString()))); } catch (ExecutionException e) { throw new RuntimeException(e.getCause()); } catch (InterruptedException | TimeoutException e) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java index c360eeabb5838..e0a1b8f89ee3f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java @@ -132,7 +132,7 @@ public void getInternalStats( }) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get internal stats for topic {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -233,7 +233,8 @@ public void getPartitionedStats( getPartitionedTopicMetadataAsync(topicName, authoritative, false).thenAccept(partitionMetadata -> { if (partitionMetadata.partitions == 0) { - asyncResponse.resume(new RestException(Status.NOT_FOUND, "Partitioned Topic not found")); + asyncResponse.resume(new RestException(Status.NOT_FOUND, + String.format("Partitioned topic not found %s", topicName.toString()))); return; } NonPersistentPartitionedTopicStatsImpl stats = @@ -467,7 +468,7 @@ public void getListFromBundle( } asyncResponse.resume(topicList); }).exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to list topics on namespace bundle {}/{}", clientAppId(), namespaceName, bundleRange, ex); } @@ -476,7 +477,7 @@ public void getListFromBundle( }); } }).exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to list topics on namespace bundle {}/{}", clientAppId(), namespaceName, bundleRange, ex); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java index 1927d4b244aa4..1fc46f9c872b9 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java @@ -117,7 +117,7 @@ public void getList( internalGetListAsync(Optional.ofNullable(bundle)) .thenAccept(topicList -> asyncResponse.resume(filterSystemTopic(topicList, includeSystemTopic))) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to get topic list {}", clientAppId(), namespaceName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -148,7 +148,7 @@ public void getPartitionedTopicList( .thenAccept(partitionedTopicList -> asyncResponse.resume( filterSystemTopic(partitionedTopicList, includeSystemTopic))) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to get partitioned topic list {}", clientAppId(), namespaceName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -333,7 +333,7 @@ public void createNonPartitionedTopic( internalCreateNonPartitionedTopicAsync(authoritative, properties) .thenAccept(__ -> asyncResponse.resume(Response.noContent().build())) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to create non-partitioned topic {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -822,7 +822,7 @@ public void updatePartitionedTopic( asyncResponse.resume(Response.noContent().build()); }) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}][{}] Failed to update partition to {}", clientAppId(), topicName, numPartitions, ex); } @@ -924,7 +924,7 @@ public void getProperties( internalGetPropertiesAsync(authoritative) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to get topic {} properties", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -960,7 +960,7 @@ public void updateProperties( internalUpdatePropertiesAsync(authoritative, properties) .thenAccept(__ -> asyncResponse.resume(Response.noContent().build())) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to update topic {} properties", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -994,7 +994,7 @@ public void removeProperties( internalRemovePropertiesAsync(authoritative, key) .thenAccept(__ -> asyncResponse.resume(Response.noContent().build())) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to remove key {} in properties on topic {}", clientAppId(), key, topicName, ex); } @@ -1115,7 +1115,7 @@ public void deleteTopic( } else if (isManagedLedgerNotFoundException(t)) { ex = new RestException(Response.Status.NOT_FOUND, getTopicNotFoundErrorMessage(topicName.toString())); - } else if (!isRedirectException(ex)) { + } else if (isNot307And404Exception(ex)) { log.error("[{}] Failed to delete topic {}", clientAppId(), topicName, t); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -1192,7 +1192,7 @@ public void getStats( .thenAccept(asyncResponse::resume) .exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to get stats for {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -1226,7 +1226,7 @@ public void getInternalStats( internalGetInternalStatsAsync(authoritative, metadata) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to get internal stats for topic {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -1870,7 +1870,7 @@ public void peekNthMessage( internalPeekNthMessageAsync(decode(encodedSubName), messagePosition, authoritative) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to get peek nth message for topic {} subscription {}", clientAppId(), topicName, decode(encodedSubName), ex); } @@ -1912,7 +1912,7 @@ public void examineMessage( internalExamineMessageAsync(initialPosition, messagePosition, authoritative) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to examine a specific message on the topic {}", clientAppId(), topicName, ex); } @@ -1954,7 +1954,7 @@ public void getMessageById( .thenAccept(asyncResponse::resume) .exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to get message with ledgerId {} entryId {} from {}", clientAppId(), ledgerId, entryId, topicName, ex); } @@ -1998,7 +1998,7 @@ public void getMessageIdByTimestamp( } }) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to get message ID by timestamp {} from {}", clientAppId(), timestamp, topicName, ex); } @@ -2033,7 +2033,7 @@ public void getBacklog( log.warn("[{}] Failed to get topic backlog {}: Namespace does not exist", clientAppId(), namespaceName); ex = new RestException(Response.Status.NOT_FOUND, "Namespace does not exist"); - } else if (!isRedirectException(ex)) { + } else if (isNot307And404Exception(ex)) { log.error("[{}] Failed to get estimated backlog for topic {}", clientAppId(), encodedTopic, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -2414,7 +2414,8 @@ public void getRetention(@Suspended final AsyncResponse asyncResponse, @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); - preValidation(authoritative) + validateTopicPolicyOperationAsync(topicName, PolicyName.RETENTION, PolicyOperation.READ) + .thenCompose(__ -> preValidation(authoritative)) .thenCompose(__ -> internalGetRetention(applied, isGlobal)) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { @@ -2441,7 +2442,8 @@ public void setRetention(@Suspended final AsyncResponse asyncResponse, @QueryParam("isGlobal") @DefaultValue("false") boolean isGlobal, @ApiParam(value = "Retention policies for the specified topic") RetentionPolicies retention) { validateTopicName(tenant, namespace, encodedTopic); - preValidation(authoritative) + validateTopicPolicyOperationAsync(topicName, PolicyName.RETENTION, PolicyOperation.WRITE) + .thenCompose(__ -> preValidation(authoritative)) .thenCompose(__ -> internalSetRetention(retention, isGlobal)) .thenRun(() -> { try { @@ -2477,7 +2479,8 @@ public void removeRetention(@Suspended final AsyncResponse asyncResponse, @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); - preValidation(authoritative) + validateTopicPolicyOperationAsync(topicName, PolicyName.RETENTION, PolicyOperation.WRITE) + .thenCompose(__ -> preValidation(authoritative)) .thenCompose(__ -> internalRemoveRetention(isGlobal)) .thenRun(() -> { log.info("[{}] Successfully remove retention: namespace={}, topic={}", @@ -3071,7 +3074,7 @@ public void terminate( internalTerminateAsync(authoritative) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to terminated topic {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); @@ -3167,7 +3170,7 @@ public void compactionStatus( internalCompactionStatusAsync(authoritative) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to get the status of a compaction operation for the topic {}", clientAppId(), topicName, ex); } @@ -3306,7 +3309,7 @@ public void trimTopic( validateTopicName(tenant, namespace, encodedTopic); internalTrimTopic(asyncResponse, authoritative).exceptionally(ex -> { // If the exception is not redirect exception we need to log it. - if (!isRedirectException(ex)) { + if (isNot307And404Exception(ex)) { log.error("[{}] Failed to trim topic {}", clientAppId(), topicName, ex); } resumeAsyncResponseExceptionally(asyncResponse, ex); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Transactions.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Transactions.java index 1bdc2255085f1..667d8ce581ece 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Transactions.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Transactions.java @@ -105,7 +105,7 @@ public void getTransactionInBufferStats(@Suspended final AsyncResponse asyncResp Long.parseLong(leastSigBits)) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get transaction state in transaction buffer {}", clientAppId(), topicName, ex); } @@ -143,7 +143,7 @@ public void getTransactionInPendingAckStats(@Suspended final AsyncResponse async Long.parseLong(leastSigBits), subName) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get transaction state in pending ack {}", clientAppId(), topicName, ex); } @@ -181,7 +181,7 @@ public void getTransactionBufferStats(@Suspended final AsyncResponse asyncRespon internalGetTransactionBufferStats(authoritative, lowWaterMarks, segmentStats) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get transaction buffer stats in topic {}", clientAppId(), topicName, ex); } @@ -217,7 +217,7 @@ public void getPendingAckStats(@Suspended final AsyncResponse asyncResponse, internalGetPendingAckStats(authoritative, subName, lowWaterMarks) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get transaction pending ack stats in topic {}", clientAppId(), topicName, ex); } @@ -314,7 +314,7 @@ public void getPendingAckInternalStats(@Suspended final AsyncResponse asyncRespo internalGetPendingAckInternalStats(authoritative, subName, metadata) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get pending ack internal stats {}", clientAppId(), topicName, ex); } @@ -365,7 +365,7 @@ public void getTransactionBufferInternalStats(@Suspended final AsyncResponse asy internalGetTransactionBufferInternalStats(authoritative, metadata) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { - if (!isRedirectException(ex)) { + if (!isNot307And404Exception(ex)) { log.error("[{}] Failed to get transaction buffer internal stats {}", clientAppId(), topicName, ex); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/BucketDelayedDeliveryTrackerFactory.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/BucketDelayedDeliveryTrackerFactory.java index 6a00bfd199584..33076fd51a8e9 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/BucketDelayedDeliveryTrackerFactory.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/BucketDelayedDeliveryTrackerFactory.java @@ -27,6 +27,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import org.apache.bookkeeper.mledger.ManagedCursor; +import org.apache.commons.collections4.MapUtils; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.delayed.bucket.BookkeeperBucketSnapshotStorage; @@ -85,6 +86,9 @@ public DelayedDeliveryTracker newTracker(PersistentDispatcherMultipleConsumers d */ public CompletableFuture cleanResidualSnapshots(ManagedCursor cursor) { Map cursorProperties = cursor.getCursorProperties(); + if (MapUtils.isEmpty(cursorProperties)) { + return CompletableFuture.completedFuture(null); + } List> futures = new ArrayList<>(); FutureUtil.Sequencer sequencer = FutureUtil.Sequencer.create(); cursorProperties.forEach((k, v) -> { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTracker.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTracker.java index 67a7de1f01339..f98c9e000f150 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTracker.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/bucket/BucketDelayedDeliveryTracker.java @@ -50,6 +50,7 @@ import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.collections4.MapUtils; import org.apache.commons.lang3.mutable.MutableLong; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.delayed.AbstractDelayedDeliveryTracker; @@ -137,9 +138,15 @@ public BucketDelayedDeliveryTracker(PersistentDispatcherMultipleConsumers dispat private synchronized long recoverBucketSnapshot() throws RuntimeException { ManagedCursor cursor = this.lastMutableBucket.getCursor(); + Map cursorProperties = cursor.getCursorProperties(); + if (MapUtils.isEmpty(cursorProperties)) { + log.info("[{}] Recover delayed message index bucket snapshot finish, don't find bucket snapshot", + dispatcher.getName()); + return 0; + } FutureUtil.Sequencer sequencer = this.lastMutableBucket.getSequencer(); Map, ImmutableBucket> toBeDeletedBucketMap = new HashMap<>(); - cursor.getCursorProperties().keySet().forEach(key -> { + cursorProperties.keySet().forEach(key -> { if (key.startsWith(DELAYED_BUCKET_KEY_PREFIX)) { String[] keys = key.split(DELIMITER); checkArgument(keys.length == 3); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptors.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptors.java index cef3f0eb609a1..30d1874a97299 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptors.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptors.java @@ -59,6 +59,9 @@ public BrokerInterceptors(Map intercep * @return the collection of broker event interceptor */ public static BrokerInterceptor load(ServiceConfiguration conf) throws IOException { + if (conf.getBrokerInterceptors().isEmpty()) { + return null; + } BrokerInterceptorDefinitions definitions = BrokerInterceptorUtils.searchForInterceptors(conf.getBrokerInterceptorsDirectory(), conf.getNarExtractionDirectory()); @@ -87,7 +90,7 @@ public static BrokerInterceptor load(ServiceConfiguration conf) throws IOExcepti }); Map interceptors = builder.build(); - if (interceptors != null && !interceptors.isEmpty()) { + if (!interceptors.isEmpty()) { return new BrokerInterceptors(interceptors); } else { return null; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderBroker.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderBroker.java index acd34e151ed2a..d7c21de5ea1fa 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderBroker.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderBroker.java @@ -30,5 +30,23 @@ @AllArgsConstructor @NoArgsConstructor public class LeaderBroker { + private String brokerId; private String serviceUrl; + + public String getBrokerId() { + if (brokerId != null) { + return brokerId; + } else { + // for backward compatibility at runtime with older versions of Pulsar + return parseHostAndPort(serviceUrl); + } + } + + private static String parseHostAndPort(String serviceUrl) { + int uriSeparatorPos = serviceUrl.indexOf("://"); + if (uriSeparatorPos == -1) { + throw new IllegalArgumentException("'" + serviceUrl + "' isn't an URI."); + } + return serviceUrl.substring(uriSeparatorPos + 3); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderElectionService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderElectionService.java index 05fe4353f3e76..2e53b54e98f61 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderElectionService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LeaderElectionService.java @@ -35,17 +35,17 @@ public class LeaderElectionService implements AutoCloseable { private final LeaderElection leaderElection; private final LeaderBroker localValue; - public LeaderElectionService(CoordinationService cs, String localWebServiceAddress, - Consumer listener) { - this(cs, localWebServiceAddress, ELECTION_ROOT, listener); + public LeaderElectionService(CoordinationService cs, String brokerId, + String serviceUrl, Consumer listener) { + this(cs, brokerId, serviceUrl, ELECTION_ROOT, listener); } public LeaderElectionService(CoordinationService cs, - String localWebServiceAddress, - String electionRoot, + String brokerId, + String serviceUrl, String electionRoot, Consumer listener) { this.leaderElection = cs.getLeaderElection(LeaderBroker.class, electionRoot, listener); - this.localValue = new LeaderBroker(localWebServiceAddress); + this.localValue = new LeaderBroker(brokerId, serviceUrl); } public void start() { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java index 17aa7170fc63c..61f34ef4901ba 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtils.java @@ -161,7 +161,8 @@ public static long getCpuUsageForCGroup() { * *

* Line is split in "words", filtering the first. The sum of all numbers give the amount of cpu cycles used this - * far. Real CPU usage should equal the sum subtracting the idle cycles, this would include iowait, irq and steal. + * far. Real CPU usage should equal the sum substracting the idle cycles(that is idle+iowait), this would include + * cpu, user, nice, system, irq, softirq, steal, guest and guest_nice. */ public static ResourceUsage getCpuUsageForEntireHost() { try (Stream stream = Files.lines(Paths.get(PROC_STAT_PATH))) { @@ -175,7 +176,7 @@ public static ResourceUsage getCpuUsageForEntireHost() { .filter(s -> !s.contains("cpu")) .mapToLong(Long::parseLong) .sum(); - long idle = Long.parseLong(words[4]); + long idle = Long.parseLong(words[4]) + Long.parseLong(words[5]); return ResourceUsage.builder() .usage(total - idle) .idle(idle) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java index a632a47f05116..c1fe2a4930c34 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java @@ -64,7 +64,7 @@ public Map getBundleData() { public Map getBundleDataForLoadShedding() { return bundleData.entrySet().stream() - .filter(e -> !NamespaceService.filterNamespaceForShedding( + .filter(e -> !NamespaceService.isSLAOrHeartbeatNamespace( NamespaceBundle.getBundleNamespace(e.getKey()))) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/NoopLoadManager.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/NoopLoadManager.java index 0de2ae92db61a..f9f36b705d4c4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/NoopLoadManager.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/NoopLoadManager.java @@ -43,7 +43,7 @@ public class NoopLoadManager implements LoadManager { private PulsarService pulsar; - private String lookupServiceAddress; + private String brokerId; private ResourceUnit localResourceUnit; private LockManager lockManager; private Map bundleBrokerAffinityMap; @@ -57,16 +57,15 @@ public void initialize(PulsarService pulsar) { @Override public void start() throws PulsarServerException { - lookupServiceAddress = pulsar.getLookupServiceAddress(); - localResourceUnit = new SimpleResourceUnit(String.format("http://%s", lookupServiceAddress), - new PulsarResourceDescription()); + brokerId = pulsar.getBrokerId(); + localResourceUnit = new SimpleResourceUnit(brokerId, new PulsarResourceDescription()); - LocalBrokerData localData = new LocalBrokerData(pulsar.getSafeWebServiceAddress(), + LocalBrokerData localData = new LocalBrokerData(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), pulsar.getAdvertisedListeners()); localData.setProtocols(pulsar.getProtocolDataToAdvertise()); localData.setLoadManagerClassName(this.pulsar.getConfig().getLoadManagerClassName()); - String brokerReportPath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + lookupServiceAddress; + String brokerReportPath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + brokerId; try { log.info("Acquiring broker resource lock on {}", brokerReportPath); @@ -129,12 +128,12 @@ public void disableBroker() throws Exception { @Override public Set getAvailableBrokers() throws Exception { - return Collections.singleton(lookupServiceAddress); + return Collections.singleton(brokerId); } @Override public CompletableFuture> getAvailableBrokersAsync() { - return CompletableFuture.completedFuture(Collections.singleton(lookupServiceAddress)); + return CompletableFuture.completedFuture(Collections.singleton(brokerId)); } @Override @@ -153,7 +152,6 @@ public String setNamespaceBundleAffinity(String bundle, String broker) { if (StringUtils.isBlank(broker)) { return this.bundleBrokerAffinityMap.remove(bundle); } - broker = broker.replaceFirst("http[s]?://", ""); return this.bundleBrokerAffinityMap.put(bundle, broker); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java index ef4dd2a97b280..c28a8be4c0d3a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java @@ -23,8 +23,6 @@ */ public interface ResourceUnit extends Comparable { - String PROPERTY_KEY_BROKER_ZNODE_NAME = "__advertised_addr"; - String getResourceId(); ResourceDescription getAvailableResource(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryImpl.java index 921ce35b5c65e..18e30ddf922d0 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryImpl.java @@ -82,9 +82,9 @@ public BrokerRegistryImpl(PulsarService pulsar) { this.brokerLookupDataLockManager = pulsar.getCoordinationService().getLockManager(BrokerLookupData.class); this.scheduler = pulsar.getLoadManagerExecutor(); this.listeners = new ArrayList<>(); - this.brokerId = pulsar.getLookupServiceAddress(); + this.brokerId = pulsar.getBrokerId(); this.brokerLookupData = new BrokerLookupData( - pulsar.getSafeWebServiceAddress(), + pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImpl.java index cba499eb8eedb..6a0e677c66268 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImpl.java @@ -21,6 +21,7 @@ import static java.lang.String.format; import static org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl.Role.Follower; import static org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl.Role.Leader; +import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl.TOPIC; import static org.apache.pulsar.broker.loadbalance.extensions.models.SplitDecision.Label.Success; import static org.apache.pulsar.broker.loadbalance.extensions.models.SplitDecision.Reason.Admin; import static org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared.getNamespaceBundle; @@ -44,6 +45,7 @@ import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.mutable.MutableObject; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; @@ -86,6 +88,7 @@ import org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared; import org.apache.pulsar.broker.loadbalance.impl.SimpleResourceAllocationPolicies; import org.apache.pulsar.broker.namespace.NamespaceEphemeralData; +import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceBundleSplitAlgorithm; @@ -95,7 +98,6 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.stats.Metrics; import org.apache.pulsar.common.util.FutureUtil; -import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.metadata.api.coordination.LeaderElectionState; import org.slf4j.Logger; @@ -116,6 +118,8 @@ public class ExtensibleLoadManagerImpl implements ExtensibleLoadManager { private static final long MONITOR_INTERVAL_IN_MILLIS = 120_000; + public static final long COMPACTION_THRESHOLD = 5 * 1024 * 1024; + private static final String ELECTION_ROOT = "/loadbalance/extension/leader"; private PulsarService pulsar; @@ -152,6 +156,7 @@ public class ExtensibleLoadManagerImpl implements ExtensibleLoadManager { @Getter private final List brokerFilterPipeline; + /** * The load data reporter. */ @@ -171,6 +176,8 @@ public class ExtensibleLoadManagerImpl implements ExtensibleLoadManager { private volatile boolean started = false; + private boolean configuredSystemTopics = false; + private final AssignCounter assignCounter = new AssignCounter(); @Getter private final UnloadCounter unloadCounter = new UnloadCounter(); @@ -181,10 +188,8 @@ public class ExtensibleLoadManagerImpl implements ExtensibleLoadManager { // record split metrics private final AtomicReference> splitMetrics = new AtomicReference<>(); - private final ConcurrentOpenHashMap>> - lookupRequests = ConcurrentOpenHashMap.>>newBuilder() - .build(); + private final ConcurrentHashMap>> + lookupRequests = new ConcurrentHashMap<>(); private final CountDownLatch initWaiter = new CountDownLatch(1); /** @@ -197,7 +202,7 @@ public Set getOwnedServiceUnits() { } Set> entrySet = serviceUnitStateChannel.getOwnershipEntrySet(); String brokerId = brokerRegistry.getBrokerId(); - return entrySet.stream() + Set ownedServiceUnits = entrySet.stream() .filter(entry -> { var stateData = entry.getValue(); return stateData.state() == ServiceUnitState.Owned @@ -207,6 +212,36 @@ public Set getOwnedServiceUnits() { var bundle = entry.getKey(); return getNamespaceBundle(pulsar, bundle); }).collect(Collectors.toSet()); + // Add heartbeat and SLA monitor namespace bundle. + NamespaceName heartbeatNamespace = NamespaceService.getHeartbeatNamespace(brokerId, pulsar.getConfiguration()); + try { + NamespaceBundle fullBundle = pulsar.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(heartbeatNamespace); + ownedServiceUnits.add(fullBundle); + } catch (Exception e) { + log.warn("Failed to get heartbeat namespace bundle.", e); + } + NamespaceName heartbeatNamespaceV2 = NamespaceService + .getHeartbeatNamespaceV2(brokerId, pulsar.getConfiguration()); + try { + NamespaceBundle fullBundle = pulsar.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(heartbeatNamespaceV2); + ownedServiceUnits.add(fullBundle); + } catch (Exception e) { + log.warn("Failed to get heartbeat namespace V2 bundle.", e); + } + + NamespaceName slaMonitorNamespace = NamespaceService + .getSLAMonitorNamespace(brokerId, pulsar.getConfiguration()); + try { + NamespaceBundle fullBundle = pulsar.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(slaMonitorNamespace); + ownedServiceUnits.add(fullBundle); + } catch (Exception e) { + log.warn("Failed to get SLA Monitor namespace bundle.", e); + } + + return ownedServiceUnits; } public enum Role { @@ -232,6 +267,10 @@ public static boolean isLoadManagerExtensionEnabled(ServiceConfiguration conf) { return ExtensibleLoadManagerImpl.class.getName().equals(conf.getLoadManagerClassName()); } + public static boolean isLoadManagerExtensionEnabled(PulsarService pulsar) { + return pulsar.getLoadManager().get() instanceof ExtensibleLoadManagerWrapper; + } + public static ExtensibleLoadManagerImpl get(LoadManager loadManager) { if (!(loadManager instanceof ExtensibleLoadManagerWrapper loadManagerWrapper)) { throw new IllegalArgumentException("The load manager should be 'ExtensibleLoadManagerWrapper'."); @@ -249,114 +288,145 @@ public static void createSystemTopic(PulsarService pulsar, String topic) throws log.info("Created topic {}.", topic); } catch (PulsarAdminException.ConflictException ex) { if (debug(pulsar.getConfiguration(), log)) { - log.info("Topic {} already exists.", topic, ex); + log.info("Topic {} already exists.", topic); } } catch (PulsarAdminException e) { throw new PulsarServerException(e); } } + private static void createSystemTopics(PulsarService pulsar) throws PulsarServerException { + createSystemTopic(pulsar, BROKER_LOAD_DATA_STORE_TOPIC); + createSystemTopic(pulsar, TOP_BUNDLES_LOAD_DATA_STORE_TOPIC); + } + + private static boolean configureSystemTopics(PulsarService pulsar) { + try { + if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(pulsar) + && (pulsar.getConfiguration().isSystemTopicEnabled() + && pulsar.getConfiguration().isTopicLevelPoliciesEnabled())) { + Long threshold = pulsar.getAdminClient().topicPolicies().getCompactionThreshold(TOPIC); + if (threshold == null || COMPACTION_THRESHOLD != threshold.longValue()) { + pulsar.getAdminClient().topicPolicies().setCompactionThreshold(TOPIC, COMPACTION_THRESHOLD); + log.info("Set compaction threshold: {} bytes for system topic {}.", COMPACTION_THRESHOLD, TOPIC); + } + } else { + log.warn("System topic or topic level policies is disabled. " + + "{} compaction threshold follows the broker or namespace policies.", TOPIC); + } + return true; + } catch (Exception e) { + log.error("Failed to set compaction threshold for system topic:{}", TOPIC, e); + } + return false; + } + @Override public void start() throws PulsarServerException { if (this.started) { return; } - this.brokerRegistry = new BrokerRegistryImpl(pulsar); - this.leaderElectionService = new LeaderElectionService( - pulsar.getCoordinationService(), pulsar.getSafeWebServiceAddress(), ELECTION_ROOT, - state -> { - pulsar.getLoadManagerExecutor().execute(() -> { - if (state == LeaderElectionState.Leading) { - playLeader(); - } else { - playFollower(); - } + try { + this.brokerRegistry = new BrokerRegistryImpl(pulsar); + this.leaderElectionService = new LeaderElectionService( + pulsar.getCoordinationService(), pulsar.getBrokerId(), + pulsar.getSafeWebServiceAddress(), ELECTION_ROOT, + state -> { + pulsar.getLoadManagerExecutor().execute(() -> { + if (state == LeaderElectionState.Leading) { + playLeader(); + } else { + playFollower(); + } + }); }); - }); - this.serviceUnitStateChannel = new ServiceUnitStateChannelImpl(pulsar); - this.brokerRegistry.start(); - this.splitManager = new SplitManager(splitCounter); - this.unloadManager = new UnloadManager(unloadCounter); - this.serviceUnitStateChannel.listen(unloadManager); - this.serviceUnitStateChannel.listen(splitManager); - this.leaderElectionService.start(); - this.serviceUnitStateChannel.start(); - this.antiAffinityGroupPolicyHelper = - new AntiAffinityGroupPolicyHelper(pulsar, serviceUnitStateChannel); - antiAffinityGroupPolicyHelper.listenFailureDomainUpdate(); - this.antiAffinityGroupPolicyFilter = new AntiAffinityGroupPolicyFilter(antiAffinityGroupPolicyHelper); - this.brokerFilterPipeline.add(antiAffinityGroupPolicyFilter); - SimpleResourceAllocationPolicies policies = new SimpleResourceAllocationPolicies(pulsar); - this.isolationPoliciesHelper = new IsolationPoliciesHelper(policies); - this.brokerFilterPipeline.add(new BrokerIsolationPoliciesFilter(isolationPoliciesHelper)); + this.serviceUnitStateChannel = ServiceUnitStateChannelImpl.newInstance(pulsar); + this.brokerRegistry.start(); + this.splitManager = new SplitManager(splitCounter); + this.unloadManager = new UnloadManager(unloadCounter); + this.serviceUnitStateChannel.listen(unloadManager); + this.serviceUnitStateChannel.listen(splitManager); + this.leaderElectionService.start(); + this.serviceUnitStateChannel.start(); + this.antiAffinityGroupPolicyHelper = + new AntiAffinityGroupPolicyHelper(pulsar, serviceUnitStateChannel); + antiAffinityGroupPolicyHelper.listenFailureDomainUpdate(); + this.antiAffinityGroupPolicyFilter = new AntiAffinityGroupPolicyFilter(antiAffinityGroupPolicyHelper); + this.brokerFilterPipeline.add(antiAffinityGroupPolicyFilter); + SimpleResourceAllocationPolicies policies = new SimpleResourceAllocationPolicies(pulsar); + this.isolationPoliciesHelper = new IsolationPoliciesHelper(policies); + this.brokerFilterPipeline.add(new BrokerIsolationPoliciesFilter(isolationPoliciesHelper)); - createSystemTopic(pulsar, BROKER_LOAD_DATA_STORE_TOPIC); - createSystemTopic(pulsar, TOP_BUNDLES_LOAD_DATA_STORE_TOPIC); + try { + this.brokerLoadDataStore = LoadDataStoreFactory + .create(pulsar, BROKER_LOAD_DATA_STORE_TOPIC, BrokerLoadData.class); + this.topBundlesLoadDataStore = LoadDataStoreFactory + .create(pulsar, TOP_BUNDLES_LOAD_DATA_STORE_TOPIC, TopBundlesLoadData.class); + } catch (LoadDataStoreException e) { + throw new PulsarServerException(e); + } - try { - this.brokerLoadDataStore = LoadDataStoreFactory - .create(pulsar.getClient(), BROKER_LOAD_DATA_STORE_TOPIC, BrokerLoadData.class); - this.brokerLoadDataStore.startTableView(); - this.topBundlesLoadDataStore = LoadDataStoreFactory - .create(pulsar.getClient(), TOP_BUNDLES_LOAD_DATA_STORE_TOPIC, TopBundlesLoadData.class); - } catch (LoadDataStoreException e) { - throw new PulsarServerException(e); + this.context = LoadManagerContextImpl.builder() + .configuration(conf) + .brokerRegistry(brokerRegistry) + .brokerLoadDataStore(brokerLoadDataStore) + .topBundleLoadDataStore(topBundlesLoadDataStore).build(); + + this.brokerLoadDataReporter = + new BrokerLoadDataReporter(pulsar, brokerRegistry.getBrokerId(), brokerLoadDataStore); + + this.topBundleLoadDataReporter = + new TopBundleLoadDataReporter(pulsar, brokerRegistry.getBrokerId(), topBundlesLoadDataStore); + this.serviceUnitStateChannel.listen(brokerLoadDataReporter); + this.serviceUnitStateChannel.listen(topBundleLoadDataReporter); + var interval = conf.getLoadBalancerReportUpdateMinIntervalMillis(); + this.brokerLoadDataReportTask = this.pulsar.getLoadManagerExecutor() + .scheduleAtFixedRate(() -> { + try { + brokerLoadDataReporter.reportAsync(false); + // TODO: update broker load metrics using getLocalData + } catch (Throwable e) { + log.error("Failed to run the broker load manager executor job.", e); + } + }, + interval, + interval, TimeUnit.MILLISECONDS); + + this.topBundlesLoadDataReportTask = this.pulsar.getLoadManagerExecutor() + .scheduleAtFixedRate(() -> { + try { + // TODO: consider excluding the bundles that are in the process of split. + topBundleLoadDataReporter.reportAsync(false); + } catch (Throwable e) { + log.error("Failed to run the top bundles load manager executor job.", e); + } + }, + interval, + interval, TimeUnit.MILLISECONDS); + + this.monitorTask = this.pulsar.getLoadManagerExecutor() + .scheduleAtFixedRate(() -> { + monitor(); + }, + MONITOR_INTERVAL_IN_MILLIS, + MONITOR_INTERVAL_IN_MILLIS, TimeUnit.MILLISECONDS); + + this.unloadScheduler = new UnloadScheduler( + pulsar, pulsar.getLoadManagerExecutor(), unloadManager, context, + serviceUnitStateChannel, unloadCounter, unloadMetrics); + this.splitScheduler = new SplitScheduler( + pulsar, serviceUnitStateChannel, splitManager, splitCounter, splitMetrics, context); + this.splitScheduler.start(); + this.initWaiter.countDown(); + this.started = true; + log.info("Started load manager."); + } catch (Exception ex) { + log.error("Failed to start the extensible load balance and close broker registry {}.", + this.brokerRegistry, ex); + if (this.brokerRegistry != null) { + brokerRegistry.close(); + } } - - this.context = LoadManagerContextImpl.builder() - .configuration(conf) - .brokerRegistry(brokerRegistry) - .brokerLoadDataStore(brokerLoadDataStore) - .topBundleLoadDataStore(topBundlesLoadDataStore).build(); - - this.brokerLoadDataReporter = - new BrokerLoadDataReporter(pulsar, brokerRegistry.getBrokerId(), brokerLoadDataStore); - - this.topBundleLoadDataReporter = - new TopBundleLoadDataReporter(pulsar, brokerRegistry.getBrokerId(), topBundlesLoadDataStore); - this.serviceUnitStateChannel.listen(brokerLoadDataReporter); - this.serviceUnitStateChannel.listen(topBundleLoadDataReporter); - var interval = conf.getLoadBalancerReportUpdateMinIntervalMillis(); - this.brokerLoadDataReportTask = this.pulsar.getLoadManagerExecutor() - .scheduleAtFixedRate(() -> { - try { - brokerLoadDataReporter.reportAsync(false); - // TODO: update broker load metrics using getLocalData - } catch (Throwable e) { - log.error("Failed to run the broker load manager executor job.", e); - } - }, - interval, - interval, TimeUnit.MILLISECONDS); - - this.topBundlesLoadDataReportTask = this.pulsar.getLoadManagerExecutor() - .scheduleAtFixedRate(() -> { - try { - // TODO: consider excluding the bundles that are in the process of split. - topBundleLoadDataReporter.reportAsync(false); - } catch (Throwable e) { - log.error("Failed to run the top bundles load manager executor job.", e); - } - }, - interval, - interval, TimeUnit.MILLISECONDS); - - this.monitorTask = this.pulsar.getLoadManagerExecutor() - .scheduleAtFixedRate(() -> { - monitor(); - }, - MONITOR_INTERVAL_IN_MILLIS, - MONITOR_INTERVAL_IN_MILLIS, TimeUnit.MILLISECONDS); - - this.unloadScheduler = new UnloadScheduler( - pulsar, pulsar.getLoadManagerExecutor(), unloadManager, context, - serviceUnitStateChannel, unloadCounter, unloadMetrics); - this.unloadScheduler.start(); - this.splitScheduler = new SplitScheduler( - pulsar, serviceUnitStateChannel, splitManager, splitCounter, splitMetrics, context); - this.splitScheduler.start(); - this.initWaiter.countDown(); - this.started = true; } @Override @@ -377,25 +447,38 @@ public CompletableFuture> assign(Optional getOwnerAsync( - ServiceUnitId serviceUnit, String bundle, boolean ownByLocalBrokerIfAbsent) { + private String getHeartbeatOrSLAMonitorBrokerId(ServiceUnitId serviceUnit) { + // Check if this is Heartbeat or SLAMonitor namespace + String candidateBroker = NamespaceService.checkHeartbeatNamespace(serviceUnit); + if (candidateBroker == null) { + candidateBroker = NamespaceService.checkHeartbeatNamespaceV2(serviceUnit); + } + if (candidateBroker == null) { + candidateBroker = NamespaceService.getSLAMonitorBrokerName(serviceUnit); + } + if (candidateBroker != null) { + return candidateBroker.substring(candidateBroker.lastIndexOf('/') + 1); + } + return candidateBroker; + } + + private CompletableFuture getOrSelectOwnerAsync(ServiceUnitId serviceUnit, + String bundle) { return serviceUnitStateChannel.getOwnerAsync(bundle).thenCompose(broker -> { // If the bundle not assign yet, select and publish assign event to channel. if (broker.isEmpty()) { - CompletableFuture> selectedBroker; - if (ownByLocalBrokerIfAbsent) { - String brokerId = this.brokerRegistry.getBrokerId(); - selectedBroker = CompletableFuture.completedFuture(Optional.of(brokerId)); - } else { - selectedBroker = this.selectAsync(serviceUnit); - } - return selectedBroker.thenCompose(brokerOpt -> { + return this.selectAsync(serviceUnit).thenCompose(brokerOpt -> { if (brokerOpt.isPresent()) { assignCounter.incrementSuccess(); log.info("Selected new owner broker: {} for bundle: {}.", brokerOpt.get(), bundle); @@ -425,7 +508,8 @@ private CompletableFuture> getBrokerLookupData( }).thenCompose(broker -> this.getBrokerRegistry().lookupAsync(broker).thenCompose(brokerLookupData -> { if (brokerLookupData.isEmpty()) { String errorMsg = String.format( - "Failed to look up a broker registry:%s for bundle:%s", broker, bundle); + "Failed to lookup broker:%s for bundle:%s, the broker has not been registered.", + broker, bundle); log.error(errorMsg); throw new IllegalStateException(errorMsg); } @@ -443,30 +527,37 @@ private CompletableFuture> getBrokerLookupData( public CompletableFuture tryAcquiringOwnership(NamespaceBundle namespaceBundle) { log.info("Try acquiring ownership for bundle: {} - {}.", namespaceBundle, brokerRegistry.getBrokerId()); final String bundle = namespaceBundle.toString(); - return dedupeLookupRequest(bundle, k -> { - final CompletableFuture owner = - this.getOwnerAsync(namespaceBundle, bundle, true); - return getBrokerLookupData(owner.thenApply(Optional::ofNullable), bundle); - }).thenApply(brokerLookupData -> { - if (brokerLookupData.isEmpty()) { - throw new IllegalStateException( - "Failed to get the broker lookup data for bundle: " + bundle); - } - return brokerLookupData.get().toNamespaceEphemeralData(); - }); + return assign(Optional.empty(), namespaceBundle) + .thenApply(brokerLookupData -> { + if (brokerLookupData.isEmpty()) { + String errorMsg = String.format( + "Failed to get the broker lookup data for bundle:%s", bundle); + log.error(errorMsg); + throw new IllegalStateException(errorMsg); + } + return brokerLookupData.get().toNamespaceEphemeralData(); + }); } private CompletableFuture> dedupeLookupRequest( String key, Function>> provider) { - CompletableFuture> future = lookupRequests.computeIfAbsent(key, provider); - future.whenComplete((r, t) -> { - if (t != null) { + final MutableObject>> newFutureCreated = new MutableObject<>(); + try { + return lookupRequests.computeIfAbsent(key, k -> { + CompletableFuture> future = provider.apply(k); + newFutureCreated.setValue(future); + return future; + }); + } finally { + if (newFutureCreated.getValue() != null) { + newFutureCreated.getValue().whenComplete((v, ex) -> { + if (ex != null) { assignCounter.incrementFailure(); } lookupRequests.remove(key); - } - ); - return future; + }); + } + } } public CompletableFuture> selectAsync(ServiceUnitId bundle) { @@ -521,15 +612,16 @@ public CompletableFuture checkOwnershipAsync(Optional to } public CompletableFuture> getOwnershipAsync(Optional topic, - ServiceUnitId bundleUnit) { - final String bundle = bundleUnit.toString(); - CompletableFuture> owner; + ServiceUnitId serviceUnit) { + final String bundle = serviceUnit.toString(); if (topic.isPresent() && isInternalTopic(topic.get().toString())) { - owner = serviceUnitStateChannel.getChannelOwnerAsync(); - } else { - owner = serviceUnitStateChannel.getOwnerAsync(bundle); + return serviceUnitStateChannel.getChannelOwnerAsync(); + } + String candidateBroker = getHeartbeatOrSLAMonitorBrokerId(serviceUnit); + if (candidateBroker != null) { + return CompletableFuture.completedFuture(Optional.of(candidateBroker)); } - return owner; + return serviceUnitStateChannel.getOwnerAsync(bundle); } public CompletableFuture> getOwnershipWithLookupDataAsync(ServiceUnitId bundleUnit) { @@ -543,6 +635,10 @@ public CompletableFuture> getOwnershipWithLookupDataA public CompletableFuture unloadNamespaceBundleAsync(ServiceUnitId bundle, Optional destinationBroker) { + if (NamespaceService.isSLAOrHeartbeatNamespace(bundle.getNamespaceObject().toString())) { + log.info("Skip unloading namespace bundle: {}.", bundle); + return CompletableFuture.completedFuture(null); + } return getOwnershipAsync(Optional.empty(), bundle) .thenCompose(brokerOpt -> { if (brokerOpt.isEmpty()) { @@ -577,6 +673,10 @@ private CompletableFuture unloadAsync(UnloadDecision unloadDecision, public CompletableFuture splitNamespaceBundleAsync(ServiceUnitId bundle, NamespaceBundleSplitAlgorithm splitAlgorithm, List boundaries) { + if (NamespaceService.isSLAOrHeartbeatNamespace(bundle.getNamespaceObject().toString())) { + log.info("Skip split namespace bundle: {}.", bundle); + return CompletableFuture.completedFuture(null); + } final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(bundle.toString()); final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundle.toString()); NamespaceBundle namespaceBundle = @@ -666,82 +766,106 @@ public void close() throws PulsarServerException { } } - private boolean isInternalTopic(String topic) { - return topic.startsWith(ServiceUnitStateChannelImpl.TOPIC) + public static boolean isInternalTopic(String topic) { + return topic.startsWith(TOPIC) || topic.startsWith(BROKER_LOAD_DATA_STORE_TOPIC) || topic.startsWith(TOP_BUNDLES_LOAD_DATA_STORE_TOPIC); } @VisibleForTesting - void playLeader() { - if (role != Leader) { - log.info("This broker:{} is changing the role from {} to {}", - pulsar.getLookupServiceAddress(), role, Leader); - int retry = 0; - while (true) { - try { - initWaiter.await(); - serviceUnitStateChannel.scheduleOwnershipMonitor(); - topBundlesLoadDataStore.startTableView(); - unloadScheduler.start(); + synchronized void playLeader() { + log.info("This broker:{} is setting the role from {} to {}", + pulsar.getBrokerId(), role, Leader); + int retry = 0; + boolean becameFollower = false; + while (!Thread.currentThread().isInterrupted()) { + try { + if (!serviceUnitStateChannel.isChannelOwner()) { + becameFollower = true; break; - } catch (Throwable e) { - log.error("The broker:{} failed to change the role. Retrying {} th ...", - pulsar.getLookupServiceAddress(), ++retry, e); - try { - Thread.sleep(Math.min(retry * 10, MAX_ROLE_CHANGE_RETRY_DELAY_IN_MILLIS)); - } catch (InterruptedException ex) { - log.warn("Interrupted while sleeping."); - } + } + initWaiter.await(); + // Confirm the system topics have been created or create them if they do not exist. + // If the leader has changed, the new leader need to reset + // the local brokerService.topics (by this topic creations). + // Otherwise, the system topic existence check will fail on the leader broker. + createSystemTopics(pulsar); + brokerLoadDataStore.init(); + topBundlesLoadDataStore.init(); + unloadScheduler.start(); + serviceUnitStateChannel.scheduleOwnershipMonitor(); + break; + } catch (Throwable e) { + log.warn("The broker:{} failed to set the role. Retrying {} th ...", + pulsar.getBrokerId(), ++retry, e); + try { + Thread.sleep(Math.min(retry * 10, MAX_ROLE_CHANGE_RETRY_DELAY_IN_MILLIS)); + } catch (InterruptedException ex) { + log.warn("Interrupted while sleeping."); + // preserve thread's interrupt status + Thread.currentThread().interrupt(); } } - role = Leader; - log.info("This broker:{} plays the leader now.", pulsar.getLookupServiceAddress()); } - // flush the load data when the leader is elected. - if (brokerLoadDataReporter != null) { - brokerLoadDataReporter.reportAsync(true); - } - if (topBundleLoadDataReporter != null) { - topBundleLoadDataReporter.reportAsync(true); + if (becameFollower) { + log.warn("The broker:{} became follower while initializing leader role.", pulsar.getBrokerId()); + playFollower(); + return; } + + role = Leader; + log.info("This broker:{} plays the leader now.", pulsar.getBrokerId()); + + // flush the load data when the leader is elected. + brokerLoadDataReporter.reportAsync(true); + topBundleLoadDataReporter.reportAsync(true); } @VisibleForTesting - void playFollower() { - if (role != Follower) { - log.info("This broker:{} is changing the role from {} to {}", - pulsar.getLookupServiceAddress(), role, Follower); - int retry = 0; - while (true) { - try { - initWaiter.await(); - serviceUnitStateChannel.cancelOwnershipMonitor(); - topBundlesLoadDataStore.closeTableView(); - unloadScheduler.close(); + synchronized void playFollower() { + log.info("This broker:{} is setting the role from {} to {}", + pulsar.getBrokerId(), role, Follower); + int retry = 0; + boolean becameLeader = false; + while (!Thread.currentThread().isInterrupted()) { + try { + if (serviceUnitStateChannel.isChannelOwner()) { + becameLeader = true; break; - } catch (Throwable e) { - log.error("The broker:{} failed to change the role. Retrying {} th ...", - pulsar.getLookupServiceAddress(), ++retry, e); - try { - Thread.sleep(Math.min(retry * 10, MAX_ROLE_CHANGE_RETRY_DELAY_IN_MILLIS)); - } catch (InterruptedException ex) { - log.warn("Interrupted while sleeping."); - } + } + initWaiter.await(); + unloadScheduler.close(); + serviceUnitStateChannel.cancelOwnershipMonitor(); + brokerLoadDataStore.init(); + topBundlesLoadDataStore.close(); + topBundlesLoadDataStore.startProducer(); + break; + } catch (Throwable e) { + log.warn("The broker:{} failed to set the role. Retrying {} th ...", + pulsar.getBrokerId(), ++retry, e); + try { + Thread.sleep(Math.min(retry * 10, MAX_ROLE_CHANGE_RETRY_DELAY_IN_MILLIS)); + } catch (InterruptedException ex) { + log.warn("Interrupted while sleeping."); + // preserve thread's interrupt status + Thread.currentThread().interrupt(); } } - role = Follower; - log.info("This broker:{} plays a follower now.", pulsar.getLookupServiceAddress()); } - // flush the load data when the leader is elected. - if (brokerLoadDataReporter != null) { - brokerLoadDataReporter.reportAsync(true); - } - if (topBundleLoadDataReporter != null) { - topBundleLoadDataReporter.reportAsync(true); + if (becameLeader) { + log.warn("This broker:{} became leader while initializing follower role.", pulsar.getBrokerId()); + playLeader(); + return; } + + role = Follower; + log.info("This broker:{} plays a follower now.", pulsar.getBrokerId()); + + // flush the load data when the leader is elected. + brokerLoadDataReporter.reportAsync(true); + topBundleLoadDataReporter.reportAsync(true); } public List getMetrics() { @@ -765,7 +889,9 @@ public List getMetrics() { return metricsCollection; } - private void monitor() { + + @VisibleForTesting + protected void monitor() { try { initWaiter.await(); @@ -773,6 +899,11 @@ private void monitor() { // Periodically check the role in case ZK watcher fails. var isChannelOwner = serviceUnitStateChannel.isChannelOwner(); if (isChannelOwner) { + // System topic config might fail due to the race condition + // with topic policy init(Topic policies cache have not init). + if (!configuredSystemTopics) { + configuredSystemTopics = configureSystemTopics(pulsar); + } if (role != Leader) { log.warn("Current role:{} does not match with the channel ownership:{}. " + "Playing the leader role.", role, isChannelOwner); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerWrapper.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerWrapper.java index 18e949537dedb..cd1561cb70e2d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerWrapper.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerWrapper.java @@ -118,13 +118,11 @@ public void setLoadReportForceUpdateFlag() { @Override public void writeLoadReportOnZookeeper() throws Exception { // No-op, this operation is not useful, the load data reporter will automatically write. - throw new UnsupportedOperationException(); } @Override public void writeResourceQuotasToZooKeeper() throws Exception { // No-op, this operation is not useful, the load data reporter will automatically write. - throw new UnsupportedOperationException(); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelImpl.java index 717ff484fe772..1471d4a75c175 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelImpl.java @@ -54,6 +54,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ScheduledFuture; @@ -67,10 +68,12 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.mutable.MutableInt; +import org.apache.commons.lang3.mutable.MutableObject; import org.apache.pulsar.PulsarClusterMetadataSetup; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.loadbalance.LeaderBroker; import org.apache.pulsar.broker.loadbalance.LeaderElectionService; import org.apache.pulsar.broker.loadbalance.extensions.BrokerRegistry; import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; @@ -97,7 +100,6 @@ import org.apache.pulsar.common.stats.Metrics; import org.apache.pulsar.common.topics.TopicCompactionStrategy; import org.apache.pulsar.common.util.FutureUtil; -import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.extended.SessionEvent; @@ -125,9 +127,9 @@ public class ServiceUnitStateChannelImpl implements ServiceUnitStateChannel { private final PulsarService pulsar; private final ServiceConfiguration config; private final Schema schema; - private final ConcurrentOpenHashMap> getOwnerRequests; - private final String lookupServiceAddress; - private final ConcurrentOpenHashMap> cleanupJobs; + private final Map> getOwnerRequests; + private final String brokerId; + private final Map> cleanupJobs; private final StateChangeListeners stateChangeListeners; private ExtensibleLoadManagerImpl loadManager; private BrokerRegistry brokerRegistry; @@ -199,19 +201,29 @@ enum MetadataState { Unstable } + public static ServiceUnitStateChannelImpl newInstance(PulsarService pulsar) { + return new ServiceUnitStateChannelImpl(pulsar); + } + public ServiceUnitStateChannelImpl(PulsarService pulsar) { + this(pulsar, MAX_IN_FLIGHT_STATE_WAITING_TIME_IN_MILLIS, OWNERSHIP_MONITOR_DELAY_TIME_IN_SECS); + } + + @VisibleForTesting + public ServiceUnitStateChannelImpl(PulsarService pulsar, + long inFlightStateWaitingTimeInMillis, + long ownershipMonitorDelayTimeInSecs) { this.pulsar = pulsar; this.config = pulsar.getConfig(); - this.lookupServiceAddress = pulsar.getLookupServiceAddress(); + this.brokerId = pulsar.getBrokerId(); this.schema = Schema.JSON(ServiceUnitStateData.class); - this.getOwnerRequests = ConcurrentOpenHashMap.>newBuilder().build(); - this.cleanupJobs = ConcurrentOpenHashMap.>newBuilder().build(); + this.getOwnerRequests = new ConcurrentHashMap<>(); + this.cleanupJobs = new ConcurrentHashMap<>(); this.stateChangeListeners = new StateChangeListeners(); this.semiTerminalStateWaitingTimeInMillis = config.getLoadBalancerServiceUnitStateTombstoneDelayTimeInSeconds() * 1000; - this.inFlightStateWaitingTimeInMillis = MAX_IN_FLIGHT_STATE_WAITING_TIME_IN_MILLIS; - this.ownershipMonitorDelayTimeInSecs = OWNERSHIP_MONITOR_DELAY_TIME_IN_SECS; + this.inFlightStateWaitingTimeInMillis = inFlightStateWaitingTimeInMillis; + this.ownershipMonitorDelayTimeInSecs = ownershipMonitorDelayTimeInSecs; if (semiTerminalStateWaitingTimeInMillis < inFlightStateWaitingTimeInMillis) { throw new IllegalArgumentException( "Invalid Config: loadBalancerServiceUnitStateCleanUpDelayTimeInSeconds < " @@ -249,7 +261,7 @@ public void scheduleOwnershipMonitor() { }, 0, ownershipMonitorDelayTimeInSecs, SECONDS); log.info("This leader broker:{} started the ownership monitor.", - lookupServiceAddress); + brokerId); } } @@ -258,13 +270,13 @@ public void cancelOwnershipMonitor() { monitorTask.cancel(false); monitorTask = null; log.info("This previous leader broker:{} stopped the ownership monitor.", - lookupServiceAddress); + brokerId); } } @Override public void cleanOwnerships() { - doCleanup(lookupServiceAddress); + doCleanup(brokerId); } public synchronized void start() throws PulsarServerException { @@ -430,19 +442,8 @@ public CompletableFuture> getChannelOwnerAsync() { new IllegalStateException("Invalid channel state:" + channelState.name())); } - return leaderElectionService.readCurrentLeader().thenApply(leader -> { - //expecting http://broker-xyz:port - // TODO: discard this protocol prefix removal - // by a util func that returns lookupServiceAddress(serviceUrl) - if (leader.isPresent()) { - String broker = leader.get().getServiceUrl(); - broker = broker.substring(broker.lastIndexOf('/') + 1); - return Optional.of(broker); - } else { - return Optional.empty(); - } - } - ); + return leaderElectionService.readCurrentLeader() + .thenApply(leader -> leader.map(LeaderBroker::getBrokerId)); } public CompletableFuture isChannelOwnerAsync() { @@ -484,7 +485,35 @@ public boolean isOwner(String serviceUnit, String targetBroker) { } public boolean isOwner(String serviceUnit) { - return isOwner(serviceUnit, lookupServiceAddress); + return isOwner(serviceUnit, brokerId); + } + + private CompletableFuture> getActiveOwnerAsync( + String serviceUnit, + ServiceUnitState state, + Optional owner) { + return deferGetOwnerRequest(serviceUnit) + .thenCompose(newOwner -> { + if (newOwner == null) { + return CompletableFuture.completedFuture(null); + } + + return brokerRegistry.lookupAsync(newOwner) + .thenApply(lookupData -> { + if (lookupData.isPresent()) { + return newOwner; + } else { + throw new IllegalStateException( + "The new owner " + newOwner + " is inactive."); + } + }); + }).whenComplete((__, e) -> { + if (e != null) { + log.error("{} failed to get active owner broker. serviceUnit:{}, state:{}, owner:{}", + brokerId, serviceUnit, state, owner, e); + ownerLookUpCounters.get(state).getFailure().incrementAndGet(); + } + }).thenApply(Optional::ofNullable); } public CompletableFuture> getOwnerAsync(String serviceUnit) { @@ -498,18 +527,22 @@ public CompletableFuture> getOwnerAsync(String serviceUnit) { ownerLookUpCounters.get(state).getTotal().incrementAndGet(); switch (state) { case Owned -> { - return CompletableFuture.completedFuture(Optional.of(data.dstBroker())); + return getActiveOwnerAsync(serviceUnit, state, Optional.of(data.dstBroker())); } case Splitting -> { - return CompletableFuture.completedFuture(Optional.of(data.sourceBroker())); + return getActiveOwnerAsync(serviceUnit, state, Optional.of(data.sourceBroker())); } case Assigning, Releasing -> { - return deferGetOwnerRequest(serviceUnit).whenComplete((__, e) -> { - if (e != null) { - ownerLookUpCounters.get(state).getFailure().incrementAndGet(); - } - }).thenApply( - broker -> broker == null ? Optional.empty() : Optional.of(broker)); + if (isTargetBroker(data.dstBroker())) { + return getActiveOwnerAsync(serviceUnit, state, Optional.of(data.dstBroker())); + } + // If this broker is not the dst broker, return the dst broker as the owner(or empty). + // Clients need to connect(redirect) to the dst broker anyway + // and wait for the dst broker to receive `Owned`. + // This is also required to return getOwnerAsync on the src broker immediately during unloading. + // Otherwise, topic creation(getOwnerAsync) could block unloading bundles, + // if the topic creation(getOwnerAsync) happens during unloading on the src broker. + return CompletableFuture.completedFuture(Optional.ofNullable(data.dstBroker())); } case Init, Free -> { return CompletableFuture.completedFuture(Optional.empty()); @@ -527,6 +560,25 @@ public CompletableFuture> getOwnerAsync(String serviceUnit) { } } + private Optional getOwner(String serviceUnit) { + ServiceUnitStateData data = tableview.get(serviceUnit); + ServiceUnitState state = state(data); + switch (state) { + case Owned -> { + return Optional.of(data.dstBroker()); + } + case Splitting -> { + return Optional.of(data.sourceBroker()); + } + case Init, Free -> { + return Optional.empty(); + } + default -> { + return null; + } + } + } + private long getNextVersionId(String serviceUnit) { var data = tableview.get(serviceUnit); return getNextVersionId(data); @@ -621,7 +673,7 @@ private void handle(String serviceUnit, ServiceUnitStateData data) { long totalHandledRequests = getHandlerTotalCounter(data).incrementAndGet(); if (debug()) { log.info("{} received a handle request for serviceUnit:{}, data:{}. totalHandledRequests:{}", - lookupServiceAddress, serviceUnit, data, totalHandledRequests); + brokerId, serviceUnit, data, totalHandledRequests); } ServiceUnitState state = state(data); @@ -680,12 +732,12 @@ private AtomicLong getHandlerCounter(ServiceUnitStateData data, boolean total) { private void log(Throwable e, String serviceUnit, ServiceUnitStateData data, ServiceUnitStateData next) { if (e == null) { - if (log.isDebugEnabled() || isTransferCommand(data)) { + if (debug() || isTransferCommand(data)) { long handlerTotalCount = getHandlerTotalCounter(data).get(); long handlerFailureCount = getHandlerFailureCounter(data).get(); log.info("{} handled {} event for serviceUnit:{}, cur:{}, next:{}, " + "totalHandledRequests:{}, totalFailedRequests:{}", - lookupServiceAddress, getLogEventTag(data), serviceUnit, + brokerId, getLogEventTag(data), serviceUnit, data == null ? "" : data, next == null ? "" : next, handlerTotalCount, handlerFailureCount @@ -696,7 +748,7 @@ lookupServiceAddress, getLogEventTag(data), serviceUnit, long handlerFailureCount = getHandlerFailureCounter(data).incrementAndGet(); log.error("{} failed to handle {} event for serviceUnit:{}, cur:{}, next:{}, " + "totalHandledRequests:{}, totalFailedRequests:{}", - lookupServiceAddress, getLogEventTag(data), serviceUnit, + brokerId, getLogEventTag(data), serviceUnit, data == null ? "" : data, next == null ? "" : next, handlerTotalCount, handlerFailureCount, @@ -719,6 +771,9 @@ private void handleSkippedEvent(String serviceUnit) { private void handleOwnEvent(String serviceUnit, ServiceUnitStateData data) { var getOwnerRequest = getOwnerRequests.remove(serviceUnit); if (getOwnerRequest != null) { + if (debug()) { + log.info("Returned owner request for serviceUnit:{}", serviceUnit); + } getOwnerRequest.complete(data.dstBroker()); } stateChangeListeners.notify(serviceUnit, data, null); @@ -770,9 +825,14 @@ private void handleFreeEvent(String serviceUnit, ServiceUnitStateData data) { if (getOwnerRequest != null) { getOwnerRequest.complete(null); } - stateChangeListeners.notify(serviceUnit, data, null); + if (isTargetBroker(data.sourceBroker())) { - log(null, serviceUnit, data, null); + stateChangeListeners.notifyOnCompletion( + data.force() ? closeServiceUnit(serviceUnit) + : CompletableFuture.completedFuture(0), serviceUnit, data) + .whenComplete((__, e) -> log(e, serviceUnit, data, null)); + } else { + stateChangeListeners.notify(serviceUnit, data, null); } } @@ -822,24 +882,58 @@ private boolean isTargetBroker(String broker) { if (broker == null) { return false; } - return broker.equals(lookupServiceAddress); + return broker.equals(brokerId); } private CompletableFuture deferGetOwnerRequest(String serviceUnit) { - return getOwnerRequests - .computeIfAbsent(serviceUnit, k -> { - CompletableFuture future = new CompletableFuture<>(); - future.orTimeout(inFlightStateWaitingTimeInMillis, TimeUnit.MILLISECONDS) - .whenComplete((v, e) -> { - if (e != null) { - getOwnerRequests.remove(serviceUnit, future); - log.warn("Failed to getOwner for serviceUnit:{}", - serviceUnit, e); - } - } - ); - return future; + + var requested = new MutableObject>(); + try { + return getOwnerRequests + .computeIfAbsent(serviceUnit, k -> { + var ownerBefore = getOwner(serviceUnit); + if (ownerBefore != null && ownerBefore.isPresent()) { + // Here, we do a quick active check first with the computeIfAbsent lock + brokerRegistry.lookupAsync(ownerBefore.get()).getNow(Optional.empty()) + .ifPresent(__ -> requested.setValue( + CompletableFuture.completedFuture(ownerBefore.get()))); + + if (requested.getValue() != null) { + return requested.getValue(); + } + } + + + CompletableFuture future = + new CompletableFuture().orTimeout(inFlightStateWaitingTimeInMillis, + TimeUnit.MILLISECONDS) + .exceptionally(e -> { + var ownerAfter = getOwner(serviceUnit); + log.warn("{} failed to wait for owner for serviceUnit:{}; Trying to " + + "return the current owner:{}", + brokerId, serviceUnit, ownerAfter, e); + if (ownerAfter == null) { + throw new IllegalStateException(e); + } + return ownerAfter.orElse(null); + }); + if (debug()) { + log.info("{} is waiting for owner for serviceUnit:{}", brokerId, serviceUnit); + } + requested.setValue(future); + return future; + }); + } finally { + var future = requested.getValue(); + if (future != null) { + future.whenComplete((__, e) -> { + getOwnerRequests.remove(serviceUnit); + if (e != null) { + log.warn("{} failed to getOwner for serviceUnit:{}", brokerId, serviceUnit, e); + } }); + } + } } private CompletableFuture closeServiceUnit(String serviceUnit) { @@ -1114,24 +1208,34 @@ private void handleBrokerDeletionEvent(String broker) { } private void scheduleCleanup(String broker, long delayInSecs) { - cleanupJobs.computeIfAbsent(broker, k -> { - Executor delayed = CompletableFuture - .delayedExecutor(delayInSecs, TimeUnit.SECONDS, pulsar.getLoadManagerExecutor()); - totalInactiveBrokerCleanupScheduledCnt++; - return CompletableFuture - .runAsync(() -> { - try { - doCleanup(broker); - } catch (Throwable e) { - log.error("Failed to run the cleanup job for the broker {}, " - + "totalCleanupErrorCnt:{}.", - broker, totalCleanupErrorCnt.incrementAndGet(), e); - } finally { - cleanupJobs.remove(broker); + var scheduled = new MutableObject>(); + try { + cleanupJobs.computeIfAbsent(broker, k -> { + Executor delayed = CompletableFuture + .delayedExecutor(delayInSecs, TimeUnit.SECONDS, pulsar.getLoadManagerExecutor()); + totalInactiveBrokerCleanupScheduledCnt++; + var future = CompletableFuture + .runAsync(() -> { + try { + doCleanup(broker); + } catch (Throwable e) { + log.error("Failed to run the cleanup job for the broker {}, " + + "totalCleanupErrorCnt:{}.", + broker, totalCleanupErrorCnt.incrementAndGet(), e); + } } - } - , delayed); - }); + , delayed); + scheduled.setValue(future); + return future; + }); + } finally { + var future = scheduled.getValue(); + if (future != null) { + future.whenComplete((v, ex) -> { + cleanupJobs.remove(broker); + }); + } + } log.info("Scheduled ownership cleanup for broker:{} with delay:{} secs. Pending clean jobs:{}.", broker, delayInSecs, cleanupJobs.size()); @@ -1139,38 +1243,43 @@ private void scheduleCleanup(String broker, long delayInSecs) { private ServiceUnitStateData getOverrideInactiveBrokerStateData(ServiceUnitStateData orphanData, - String selectedBroker, + Optional selectedBroker, String inactiveBroker) { + + + if (selectedBroker.isEmpty()) { + return new ServiceUnitStateData(Free, null, inactiveBroker, + true, getNextVersionId(orphanData)); + } + if (orphanData.state() == Splitting) { - return new ServiceUnitStateData(Splitting, orphanData.dstBroker(), selectedBroker, + return new ServiceUnitStateData(Splitting, orphanData.dstBroker(), selectedBroker.get(), Map.copyOf(orphanData.splitServiceUnitToDestBroker()), true, getNextVersionId(orphanData)); } else { - return new ServiceUnitStateData(Owned, selectedBroker, inactiveBroker, + return new ServiceUnitStateData(Owned, selectedBroker.get(), inactiveBroker, true, getNextVersionId(orphanData)); } } private void overrideOwnership(String serviceUnit, ServiceUnitStateData orphanData, String inactiveBroker) { Optional selectedBroker = selectBroker(serviceUnit, inactiveBroker); - if (selectedBroker.isPresent()) { - var override = getOverrideInactiveBrokerStateData( - orphanData, selectedBroker.get(), inactiveBroker); - log.info("Overriding ownership serviceUnit:{} from orphanData:{} to overrideData:{}", - serviceUnit, orphanData, override); - publishOverrideEventAsync(serviceUnit, orphanData, override) - .exceptionally(e -> { - log.error( - "Failed to override the ownership serviceUnit:{} orphanData:{}. " - + "Failed to publish override event. totalCleanupErrorCnt:{}", - serviceUnit, orphanData, totalCleanupErrorCnt.incrementAndGet()); - return null; - }); - } else { - log.error("Failed to override the ownership serviceUnit:{} orphanData:{}. Empty selected broker. " + if (selectedBroker.isEmpty()) { + log.warn("Empty selected broker for ownership serviceUnit:{} orphanData:{}." + "totalCleanupErrorCnt:{}", serviceUnit, orphanData, totalCleanupErrorCnt.incrementAndGet()); } + var override = getOverrideInactiveBrokerStateData(orphanData, selectedBroker, inactiveBroker); + log.info("Overriding ownership serviceUnit:{} from orphanData:{} to overrideData:{}", + serviceUnit, orphanData, override); + publishOverrideEventAsync(serviceUnit, orphanData, override) + .exceptionally(e -> { + log.error( + "Failed to override the ownership serviceUnit:{} orphanData:{}. " + + "Failed to publish override event. totalCleanupErrorCnt:{}", + serviceUnit, orphanData, totalCleanupErrorCnt.incrementAndGet()); + return null; + }); } private void waitForCleanups(String broker, boolean excludeSystemTopics, int maxWaitTimeInMillis) { @@ -1202,7 +1311,7 @@ private void waitForCleanups(String broker, boolean excludeSystemTopics, int max MILLISECONDS.sleep(OWNERSHIP_CLEAN_UP_WAIT_RETRY_DELAY_IN_MILLIS); } catch (InterruptedException e) { log.warn("Interrupted while delaying the next service unit clean-up. Cleaning broker:{}", - lookupServiceAddress); + brokerId); } } } @@ -1213,49 +1322,19 @@ private synchronized void doCleanup(String broker) { log.info("Started ownership cleanup for the inactive broker:{}", broker); int orphanServiceUnitCleanupCnt = 0; long totalCleanupErrorCntStart = totalCleanupErrorCnt.get(); - String heartbeatNamespace = - NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfiguration()) - .toString(); - String heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), - pulsar.getConfiguration()).toString(); - Map orphanSystemServiceUnits = new HashMap<>(); for (var etr : tableview.entrySet()) { var stateData = etr.getValue(); var serviceUnit = etr.getKey(); var state = state(stateData); - if (StringUtils.equals(broker, stateData.dstBroker())) { - if (isActiveState(state)) { - if (serviceUnit.startsWith(SYSTEM_NAMESPACE.toString())) { - orphanSystemServiceUnits.put(serviceUnit, stateData); - } else if (serviceUnit.startsWith(heartbeatNamespace) - || serviceUnit.startsWith(heartbeatNamespaceV2)) { - // Skip the heartbeat namespace - log.info("Skip override heartbeat namespace bundle" - + " serviceUnit:{}, stateData:{}", serviceUnit, stateData); - tombstoneAsync(serviceUnit).whenComplete((__, e) -> { - if (e != null) { - log.error("Failed cleaning the heartbeat namespace ownership serviceUnit:{}, " - + "stateData:{}, cleanupErrorCnt:{}.", - serviceUnit, stateData, - totalCleanupErrorCnt.incrementAndGet() - totalCleanupErrorCntStart, e); - } - }); - } else { - overrideOwnership(serviceUnit, stateData, broker); - } - orphanServiceUnitCleanupCnt++; - } - - } else if (StringUtils.equals(broker, stateData.sourceBroker())) { - if (isInFlightState(state)) { - if (serviceUnit.startsWith(SYSTEM_NAMESPACE.toString())) { - orphanSystemServiceUnits.put(serviceUnit, stateData); - } else { - overrideOwnership(serviceUnit, stateData, broker); - } - orphanServiceUnitCleanupCnt++; + if (StringUtils.equals(broker, stateData.dstBroker()) && isActiveState(state) + || StringUtils.equals(broker, stateData.sourceBroker()) && isInFlightState(state)) { + if (serviceUnit.startsWith(SYSTEM_NAMESPACE.toString())) { + orphanSystemServiceUnits.put(serviceUnit, stateData); + } else { + overrideOwnership(serviceUnit, stateData, broker); } + orphanServiceUnitCleanupCnt++; } } @@ -1302,7 +1381,7 @@ private synchronized void doCleanup(String broker) { broker, cleanupTime, orphanServiceUnitCleanupCnt, - totalCleanupErrorCntStart - totalCleanupErrorCnt.get(), + totalCleanupErrorCnt.get() - totalCleanupErrorCntStart, printCleanupMetrics()); } @@ -1399,16 +1478,21 @@ protected void monitorOwnerships(List brokers) { String srcBroker = stateData.sourceBroker(); var state = stateData.state(); - if (isActiveState(state)) { - if (StringUtils.isNotBlank(srcBroker) && !activeBrokers.contains(srcBroker)) { - inactiveBrokers.add(srcBroker); - } else if (StringUtils.isNotBlank(dstBroker) && !activeBrokers.contains(dstBroker)) { - inactiveBrokers.add(dstBroker); - } else if (isInFlightState(state) - && now - stateData.timestamp() > inFlightStateWaitingTimeInMillis) { - orphanServiceUnits.put(serviceUnit, stateData); - } - } else if (now - stateData.timestamp() > semiTerminalStateWaitingTimeInMillis) { + if (isActiveState(state) && StringUtils.isNotBlank(srcBroker) && !activeBrokers.contains(srcBroker)) { + inactiveBrokers.add(srcBroker); + continue; + } + if (isActiveState(state) && StringUtils.isNotBlank(dstBroker) && !activeBrokers.contains(dstBroker)) { + inactiveBrokers.add(dstBroker); + continue; + } + if (isActiveState(state) && isInFlightState(state) + && now - stateData.timestamp() > inFlightStateWaitingTimeInMillis) { + orphanServiceUnits.put(serviceUnit, stateData); + continue; + } + + if (!isActiveState(state) && now - stateData.timestamp() > semiTerminalStateWaitingTimeInMillis) { log.info("Found semi-terminal states to tombstone" + " serviceUnit:{}, stateData:{}", serviceUnit, stateData); tombstoneAsync(serviceUnit).whenComplete((__, e) -> { @@ -1486,7 +1570,7 @@ protected void monitorOwnerships(List brokers) { inactiveBrokers, inactiveBrokers.size(), orphanServiceUnitCleanupCnt, serviceUnitTombstoneCleanupCnt, - totalCleanupErrorCntStart - totalCleanupErrorCnt.get(), + totalCleanupErrorCnt.get() - totalCleanupErrorCntStart, printCleanupMetrics()); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/UnloadManager.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/UnloadManager.java index 2dde0c4708e41..ffdbbc2af4219 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/UnloadManager.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/manager/UnloadManager.java @@ -88,6 +88,13 @@ public CompletableFuture waitAsync(CompletableFuture eventPubFuture, @Override public void handleEvent(String serviceUnit, ServiceUnitStateData data, Throwable t) { + if (t != null && inFlightUnloadRequest.containsKey(serviceUnit)) { + if (log.isDebugEnabled()) { + log.debug("Handling {} for service unit {} with exception.", data, serviceUnit, t); + } + this.complete(serviceUnit, t); + return; + } ServiceUnitState state = ServiceUnitStateData.state(data); switch (state) { case Free, Owned -> this.complete(serviceUnit, t); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/models/TopKBundles.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/models/TopKBundles.java index 2f5c32197c1fd..624546fdff837 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/models/TopKBundles.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/models/TopKBundles.java @@ -30,6 +30,8 @@ import org.apache.pulsar.broker.loadbalance.extensions.data.TopBundlesLoadData; import org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared; import org.apache.pulsar.broker.loadbalance.impl.SimpleResourceAllocationPolicies; +import org.apache.pulsar.broker.namespace.NamespaceService; +import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats; @@ -70,7 +72,8 @@ public void update(Map bundleStats, int topk) { pulsar.getConfiguration().isLoadBalancerSheddingBundlesWithPoliciesEnabled(); for (var etr : bundleStats.entrySet()) { String bundle = etr.getKey(); - if (bundle.startsWith(NamespaceName.SYSTEM_NAMESPACE.toString())) { + // TODO: do not filter system topic while shedding + if (NamespaceService.isSystemServiceNamespace(NamespaceBundle.getBundleNamespace(bundle))) { continue; } if (!isLoadBalancerSheddingBundlesWithPoliciesEnabled && hasPolicies(bundle)) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/IsolationPoliciesHelper.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/IsolationPoliciesHelper.java index 468552db541ec..56238d6528e60 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/IsolationPoliciesHelper.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/policies/IsolationPoliciesHelper.java @@ -42,14 +42,14 @@ public CompletableFuture> applyIsolationPoliciesAsync(Map, private final BrokerHostUsage brokerHostUsage; - private final String lookupServiceAddress; + private final String brokerId; @Getter private final BrokerLoadData localData; @@ -67,10 +67,10 @@ public class BrokerLoadDataReporter implements LoadDataReporter, private long tombstoneDelayInMillis; public BrokerLoadDataReporter(PulsarService pulsar, - String lookupServiceAddress, + String brokerId, LoadDataStore brokerLoadDataStore) { this.brokerLoadDataStore = brokerLoadDataStore; - this.lookupServiceAddress = lookupServiceAddress; + this.brokerId = brokerId; this.pulsar = pulsar; this.conf = this.pulsar.getConfiguration(); if (SystemUtils.IS_OS_LINUX) { @@ -111,7 +111,7 @@ public CompletableFuture reportAsync(boolean force) { log.info("publishing load report:{}", localData.toString(conf)); } CompletableFuture future = - this.brokerLoadDataStore.pushAsync(this.lookupServiceAddress, newLoadData); + this.brokerLoadDataStore.pushAsync(this.brokerId, newLoadData); future.whenComplete((__, ex) -> { if (ex == null) { localData.setReportedAt(System.currentTimeMillis()); @@ -185,7 +185,7 @@ protected void tombstone() { } var lastSuccessfulTombstonedAt = lastTombstonedAt; lastTombstonedAt = now; // dedup first - brokerLoadDataStore.removeAsync(lookupServiceAddress) + brokerLoadDataStore.removeAsync(brokerId) .whenComplete((__, e) -> { if (e != null) { log.error("Failed to clean broker load data.", e); @@ -209,13 +209,13 @@ public void handleEvent(String serviceUnit, ServiceUnitStateData data, Throwable ServiceUnitState state = ServiceUnitStateData.state(data); switch (state) { case Releasing, Splitting -> { - if (StringUtils.equals(data.sourceBroker(), lookupServiceAddress)) { + if (StringUtils.equals(data.sourceBroker(), brokerId)) { localData.clear(); tombstone(); } } case Owned -> { - if (StringUtils.equals(data.dstBroker(), lookupServiceAddress)) { + if (StringUtils.equals(data.dstBroker(), brokerId)) { localData.clear(); tombstone(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/TopBundleLoadDataReporter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/TopBundleLoadDataReporter.java index 0fa37d3687c20..43e05ad1ac972 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/TopBundleLoadDataReporter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/reporter/TopBundleLoadDataReporter.java @@ -41,7 +41,7 @@ public class TopBundleLoadDataReporter implements LoadDataReporter bundleLoadDataStore; @@ -53,10 +53,10 @@ public class TopBundleLoadDataReporter implements LoadDataReporter bundleLoadDataStore) { this.pulsar = pulsar; - this.lookupServiceAddress = lookupServiceAddress; + this.brokerId = brokerId; this.bundleLoadDataStore = bundleLoadDataStore; this.lastBundleStatsUpdatedAt = 0; this.topKBundles = new TopKBundles(pulsar); @@ -88,7 +88,7 @@ public CompletableFuture reportAsync(boolean force) { if (ExtensibleLoadManagerImpl.debug(pulsar.getConfiguration(), log)) { log.info("Reporting TopBundlesLoadData:{}", topKBundles.getLoadData()); } - return this.bundleLoadDataStore.pushAsync(lookupServiceAddress, topKBundles.getLoadData()) + return this.bundleLoadDataStore.pushAsync(brokerId, topKBundles.getLoadData()) .exceptionally(e -> { log.error("Failed to report top-bundles load data.", e); return null; @@ -106,7 +106,7 @@ protected void tombstone() { } var lastSuccessfulTombstonedAt = lastTombstonedAt; lastTombstonedAt = now; // dedup first - bundleLoadDataStore.removeAsync(lookupServiceAddress) + bundleLoadDataStore.removeAsync(brokerId) .whenComplete((__, e) -> { if (e != null) { log.error("Failed to clean broker load data.", e); @@ -129,12 +129,12 @@ public void handleEvent(String serviceUnit, ServiceUnitStateData data, Throwable ServiceUnitState state = ServiceUnitStateData.state(data); switch (state) { case Releasing, Splitting -> { - if (StringUtils.equals(data.sourceBroker(), lookupServiceAddress)) { + if (StringUtils.equals(data.sourceBroker(), brokerId)) { tombstone(); } } case Owned -> { - if (StringUtils.equals(data.dstBroker(), lookupServiceAddress)) { + if (StringUtils.equals(data.dstBroker(), brokerId)) { tombstone(); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/UnloadScheduler.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/UnloadScheduler.java index d6c754c90fcf6..218f57932a56b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/UnloadScheduler.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/UnloadScheduler.java @@ -219,9 +219,8 @@ private static NamespaceUnloadStrategy createNamespaceUnloadStrategy(PulsarServi Thread.currentThread().getContextClassLoader()); log.info("Created namespace unload strategy:{}", unloadStrategy.getClass().getCanonicalName()); } catch (Exception e) { - log.error("Error when trying to create namespace unload strategy: {}", - conf.getLoadBalancerLoadPlacementStrategy(), e); - log.error("create namespace unload strategy failed. using TransferShedder instead."); + log.error("Error when trying to create namespace unload strategy: {}. Using {} instead.", + conf.getLoadBalancerLoadSheddingStrategy(), TransferShedder.class.getCanonicalName(), e); unloadStrategy = new TransferShedder(); } unloadStrategy.initialize(pulsar); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStore.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStore.java index 680a36523a214..a7deeeaad8a5c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStore.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStore.java @@ -81,9 +81,26 @@ public interface LoadDataStore extends Closeable { */ void closeTableView() throws IOException; + + /** + * Starts the data store (both producer and table view). + */ + void start() throws LoadDataStoreException; + + /** + * Inits the data store (close and start the data store). + */ + void init() throws IOException; + /** * Starts the table view. */ void startTableView() throws LoadDataStoreException; + + /** + * Starts the producer. + */ + void startProducer() throws LoadDataStoreException; + } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreFactory.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreFactory.java index 18f39abd76b76..bcb2657c67f05 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreFactory.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreFactory.java @@ -18,15 +18,16 @@ */ package org.apache.pulsar.broker.loadbalance.extensions.store; -import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.broker.PulsarService; /** * The load data store factory, use to create the load data store. */ public class LoadDataStoreFactory { - public static LoadDataStore create(PulsarClient client, String name, Class clazz) + public static LoadDataStore create(PulsarService pulsar, String name, + Class clazz) throws LoadDataStoreException { - return new TableViewLoadDataStoreImpl<>(client, name, clazz); + return new TableViewLoadDataStoreImpl<>(pulsar, name, clazz); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/TableViewLoadDataStoreImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/TableViewLoadDataStoreImpl.java index a400163ebf122..d916e91716223 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/TableViewLoadDataStoreImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/extensions/store/TableViewLoadDataStoreImpl.java @@ -23,7 +23,12 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; @@ -35,11 +40,17 @@ * * @param Load data type. */ +@Slf4j public class TableViewLoadDataStoreImpl implements LoadDataStore { - private TableView tableView; + private static final long LOAD_DATA_REPORT_UPDATE_MAX_INTERVAL_MULTIPLIER_BEFORE_RESTART = 2; - private final Producer producer; + private volatile TableView tableView; + private volatile long tableViewLastUpdateTimestamp; + + private volatile Producer producer; + + private final ServiceConfiguration conf; private final PulsarClient client; @@ -47,10 +58,11 @@ public class TableViewLoadDataStoreImpl implements LoadDataStore { private final Class clazz; - public TableViewLoadDataStoreImpl(PulsarClient client, String topic, Class clazz) throws LoadDataStoreException { + public TableViewLoadDataStoreImpl(PulsarService pulsar, String topic, Class clazz) + throws LoadDataStoreException { try { - this.client = client; - this.producer = client.newProducer(Schema.JSON(clazz)).topic(topic).create(); + this.conf = pulsar.getConfiguration(); + this.client = pulsar.getClient(); this.topic = topic; this.clazz = clazz; } catch (Exception e) { @@ -59,40 +71,42 @@ public TableViewLoadDataStoreImpl(PulsarClient client, String topic, Class cl } @Override - public CompletableFuture pushAsync(String key, T loadData) { + public synchronized CompletableFuture pushAsync(String key, T loadData) { + validateProducer(); return producer.newMessage().key(key).value(loadData).sendAsync().thenAccept(__ -> {}); } @Override - public CompletableFuture removeAsync(String key) { + public synchronized CompletableFuture removeAsync(String key) { + validateProducer(); return producer.newMessage().key(key).value(null).sendAsync().thenAccept(__ -> {}); } @Override - public Optional get(String key) { - validateTableViewStart(); + public synchronized Optional get(String key) { + validateTableView(); return Optional.ofNullable(tableView.get(key)); } @Override - public void forEach(BiConsumer action) { - validateTableViewStart(); + public synchronized void forEach(BiConsumer action) { + validateTableView(); tableView.forEach(action); } - public Set> entrySet() { - validateTableViewStart(); + public synchronized Set> entrySet() { + validateTableView(); return tableView.entrySet(); } @Override - public int size() { - validateTableViewStart(); + public synchronized int size() { + validateTableView(); return tableView.size(); } @Override - public void closeTableView() throws IOException { + public synchronized void closeTableView() throws IOException { if (tableView != null) { tableView.close(); tableView = null; @@ -100,10 +114,18 @@ public void closeTableView() throws IOException { } @Override - public void startTableView() throws LoadDataStoreException { + public synchronized void start() throws LoadDataStoreException { + startProducer(); + startTableView(); + } + + @Override + public synchronized void startTableView() throws LoadDataStoreException { if (tableView == null) { try { tableView = client.newTableViewBuilder(Schema.JSON(clazz)).topic(topic).create(); + tableView.forEachAndListen((k, v) -> + tableViewLastUpdateTimestamp = System.currentTimeMillis()); } catch (PulsarClientException e) { tableView = null; throw new LoadDataStoreException(e); @@ -112,17 +134,74 @@ public void startTableView() throws LoadDataStoreException { } @Override - public void close() throws IOException { + public synchronized void startProducer() throws LoadDataStoreException { + if (producer == null) { + try { + producer = client.newProducer(Schema.JSON(clazz)).topic(topic).create(); + } catch (PulsarClientException e) { + producer = null; + throw new LoadDataStoreException(e); + } + } + } + + @Override + public synchronized void close() throws IOException { if (producer != null) { producer.close(); + producer = null; } closeTableView(); } - private void validateTableViewStart() { - if (tableView == null) { - throw new IllegalStateException("table view has not been started"); + @Override + public synchronized void init() throws IOException { + close(); + start(); + } + + private void validateProducer() { + if (producer == null || !producer.isConnected()) { + try { + if (producer != null) { + producer.close(); + } + producer = null; + startProducer(); + log.info("Restarted producer on {}", topic); + } catch (Exception e) { + log.error("Failed to restart producer on {}", topic, e); + throw new RuntimeException(e); + } } } + private void validateTableView() { + String restartReason = null; + + if (tableView == null) { + restartReason = "table view is null"; + } else { + long inactiveDuration = System.currentTimeMillis() - tableViewLastUpdateTimestamp; + long threshold = TimeUnit.MINUTES.toMillis(conf.getLoadBalancerReportUpdateMaxIntervalMinutes()) + * LOAD_DATA_REPORT_UPDATE_MAX_INTERVAL_MULTIPLIER_BEFORE_RESTART; + if (inactiveDuration > threshold) { + restartReason = String.format("inactiveDuration=%d secs > threshold = %d secs", + TimeUnit.MILLISECONDS.toSeconds(inactiveDuration), + TimeUnit.MILLISECONDS.toSeconds(threshold)); + } + } + + if (StringUtils.isNotBlank(restartReason)) { + tableViewLastUpdateTimestamp = 0; + try { + closeTableView(); + startTableView(); + log.info("Restarted tableview on {}, {}", topic, restartReason); + } catch (Exception e) { + log.error("Failed to restart tableview on {}", topic, e); + throw new RuntimeException(e); + } + } + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java index 2f7ca614943b1..6d0e6bb907346 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java @@ -155,7 +155,8 @@ private double getTotalCpuUsageForCGroup(double elapsedTimeSeconds) { * * * Line is split in "words", filtering the first. The sum of all numbers give the amount of cpu cycles used this - * far. Real CPU usage should equal the sum subtracting the idle cycles, this would include iowait, irq and steal. + * far. Real CPU usage should equal the sum substracting the idle cycles(that is idle+iowait), this would include + * cpu, user, nice, system, irq, softirq, steal, guest and guest_nice. */ private double getTotalCpuUsageForEntireHost() { LinuxInfoUtils.ResourceUsage cpuUsageForEntireHost = getCpuUsageForEntireHost(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java index 5f2e4b1f25d8b..3d627db6cfa9e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java @@ -21,8 +21,6 @@ import static com.google.common.base.Preconditions.checkArgument; import static org.apache.pulsar.common.stats.JvmMetrics.getJvmDirectMemoryUsed; import io.netty.util.concurrent.FastThreadLocal; -import java.net.MalformedURLException; -import java.net.URL; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -106,59 +104,56 @@ public static void applyNamespacePolicies(final ServiceUnitId serviceUnit, if (isIsolationPoliciesPresent) { LOG.debug("Isolation Policies Present for namespace - [{}]", namespace.toString()); } - for (final String broker : availableBrokers) { - final String brokerUrlString = String.format("http://%s", broker); - URL brokerUrl; + for (final String brokerId : availableBrokers) { + String brokerHost; try { - brokerUrl = new URL(brokerUrlString); - } catch (MalformedURLException e) { - LOG.error("Unable to parse brokerUrl from ResourceUnitId", e); + brokerHost = parseBrokerHost(brokerId); + } catch (IllegalArgumentException e) { + LOG.error("Unable to parse host from {}", brokerId, e); continue; } // todo: in future check if the resource unit has resources to take the namespace if (isIsolationPoliciesPresent) { // note: serviceUnitID is namespace name and ResourceID is brokerName - if (policies.isPrimaryBroker(namespace, brokerUrl.getHost())) { - primariesCache.add(broker); + if (policies.isPrimaryBroker(namespace, brokerHost)) { + primariesCache.add(brokerId); if (LOG.isDebugEnabled()) { LOG.debug("Added Primary Broker - [{}] as possible Candidates for" - + " namespace - [{}] with policies", brokerUrl.getHost(), namespace.toString()); + + " namespace - [{}] with policies", brokerHost, namespace.toString()); } - } else if (policies.isSecondaryBroker(namespace, brokerUrl.getHost())) { - secondaryCache.add(broker); + } else if (policies.isSecondaryBroker(namespace, brokerHost)) { + secondaryCache.add(brokerId); if (LOG.isDebugEnabled()) { LOG.debug( "Added Shared Broker - [{}] as possible " + "Candidates for namespace - [{}] with policies", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } } else { if (LOG.isDebugEnabled()) { LOG.debug("Skipping Broker - [{}] not primary broker and not shared" + " for namespace - [{}] ", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } } } else { // non-persistent topic can be assigned to only those brokers that enabled for non-persistent topic - if (isNonPersistentTopic - && !brokerTopicLoadingPredicate.isEnableNonPersistentTopics(brokerUrlString)) { + if (isNonPersistentTopic && !brokerTopicLoadingPredicate.isEnableNonPersistentTopics(brokerId)) { if (LOG.isDebugEnabled()) { LOG.debug("Filter broker- [{}] because it doesn't support non-persistent namespace - [{}]", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } - } else if (!isNonPersistentTopic - && !brokerTopicLoadingPredicate.isEnablePersistentTopics(brokerUrlString)) { + } else if (!isNonPersistentTopic && !brokerTopicLoadingPredicate.isEnablePersistentTopics(brokerId)) { // persistent topic can be assigned to only brokers that enabled for persistent-topic if (LOG.isDebugEnabled()) { LOG.debug("Filter broker- [{}] because broker only supports non-persistent namespace - [{}]", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } - } else if (policies.isSharedBroker(brokerUrl.getHost())) { - secondaryCache.add(broker); + } else if (policies.isSharedBroker(brokerHost)) { + secondaryCache.add(brokerId); if (LOG.isDebugEnabled()) { LOG.debug("Added Shared Broker - [{}] as possible Candidates for namespace - [{}]", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } } } @@ -181,6 +176,16 @@ public static void applyNamespacePolicies(final ServiceUnitId serviceUnit, } } + private static String parseBrokerHost(String brokerId) { + // use last index to support ipv6 addresses + int lastIdx = brokerId.lastIndexOf(':'); + if (lastIdx > -1) { + return brokerId.substring(0, lastIdx); + } else { + throw new IllegalArgumentException("Invalid brokerId: " + brokerId); + } + } + public static CompletableFuture> applyNamespacePoliciesAsync( final ServiceUnitId serviceUnit, final SimpleResourceAllocationPolicies policies, final Set availableBrokers, final BrokerTopicLoadingPredicate brokerTopicLoadingPredicate) { @@ -199,59 +204,57 @@ public static CompletableFuture> applyNamespacePoliciesAsync( LOG.debug("Isolation Policies Present for namespace - [{}]", namespace.toString()); } } - for (final String broker : availableBrokers) { - final String brokerUrlString = String.format("http://%s", broker); - URL brokerUrl; + for (final String brokerId : availableBrokers) { + String brokerHost; try { - brokerUrl = new URL(brokerUrlString); - } catch (MalformedURLException e) { - LOG.error("Unable to parse brokerUrl from ResourceUnitId", e); + brokerHost = parseBrokerHost(brokerId); + } catch (IllegalArgumentException e) { + LOG.error("Unable to parse host from {}", brokerId, e); continue; } // todo: in future check if the resource unit has resources to take the namespace if (isIsolationPoliciesPresent) { // note: serviceUnitID is namespace name and ResourceID is brokerName - if (policies.isPrimaryBroker(namespace, brokerUrl.getHost())) { - primariesCache.add(broker); + if (policies.isPrimaryBroker(namespace, brokerHost)) { + primariesCache.add(brokerId); if (LOG.isDebugEnabled()) { LOG.debug("Added Primary Broker - [{}] as possible Candidates for" - + " namespace - [{}] with policies", brokerUrl.getHost(), namespace.toString()); + + " namespace - [{}] with policies", brokerHost, namespace.toString()); } - } else if (policies.isSecondaryBroker(namespace, brokerUrl.getHost())) { - secondaryCache.add(broker); + } else if (policies.isSecondaryBroker(namespace, brokerHost)) { + secondaryCache.add(brokerId); if (LOG.isDebugEnabled()) { LOG.debug( "Added Shared Broker - [{}] as possible " + "Candidates for namespace - [{}] with policies", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } } else { if (LOG.isDebugEnabled()) { LOG.debug("Skipping Broker - [{}] not primary broker and not shared" - + " for namespace - [{}] ", brokerUrl.getHost(), namespace.toString()); + + " for namespace - [{}] ", brokerHost, namespace.toString()); } } } else { // non-persistent topic can be assigned to only those brokers that enabled for non-persistent topic - if (isNonPersistentTopic - && !brokerTopicLoadingPredicate.isEnableNonPersistentTopics(brokerUrlString)) { + if (isNonPersistentTopic && !brokerTopicLoadingPredicate.isEnableNonPersistentTopics(brokerId)) { if (LOG.isDebugEnabled()) { LOG.debug("Filter broker- [{}] because it doesn't support non-persistent namespace - [{}]", - brokerUrl.getHost(), namespace.toString()); + brokerId, namespace.toString()); } - } else if (!isNonPersistentTopic - && !brokerTopicLoadingPredicate.isEnablePersistentTopics(brokerUrlString)) { + } else if (!isNonPersistentTopic && !brokerTopicLoadingPredicate + .isEnablePersistentTopics(brokerId)) { // persistent topic can be assigned to only brokers that enabled for persistent-topic if (LOG.isDebugEnabled()) { LOG.debug("Filter broker- [{}] because broker only supports non-persistent " - + "namespace - [{}]", brokerUrl.getHost(), namespace.toString()); + + "namespace - [{}]", brokerId, namespace.toString()); } - } else if (policies.isSharedBroker(brokerUrl.getHost())) { - secondaryCache.add(broker); + } else if (policies.isSharedBroker(brokerHost)) { + secondaryCache.add(brokerId); if (LOG.isDebugEnabled()) { LOG.debug("Added Shared Broker - [{}] as possible Candidates for namespace - [{}]", - brokerUrl.getHost(), namespace.toString()); + brokerHost, namespace.toString()); } } } @@ -762,9 +765,9 @@ public static boolean shouldAntiAffinityNamespaceUnload( } public interface BrokerTopicLoadingPredicate { - boolean isEnablePersistentTopics(String brokerUrl); + boolean isEnablePersistentTopics(String brokerId); - boolean isEnableNonPersistentTopics(String brokerUrl); + boolean isEnableNonPersistentTopics(String brokerId); } /** diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java index 8882c2cc6c56b..320c273a2d9b7 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java @@ -108,7 +108,7 @@ public class ModularLoadManagerImpl implements ModularLoadManager { public static final int NUM_SHORT_SAMPLES = 10; // Path to ZNode whose children contain ResourceQuota jsons. - public static final String RESOURCE_QUOTA_ZPATH = "/loadbalance/resource-quota/namespace"; + public static final String RESOURCE_QUOTA_ZPATH = "/loadbalance/resource-quota"; // Path to ZNode containing TimeAverageBrokerData jsons for each broker. public static final String TIME_AVERAGE_BROKER_ZPATH = "/loadbalance/broker-time-average"; @@ -219,15 +219,15 @@ public ModularLoadManagerImpl() { this.bundleBrokerAffinityMap = new ConcurrentHashMap<>(); this.brokerTopicLoadingPredicate = new BrokerTopicLoadingPredicate() { @Override - public boolean isEnablePersistentTopics(String brokerUrl) { - final BrokerData brokerData = loadData.getBrokerData().get(brokerUrl.replace("http://", "")); + public boolean isEnablePersistentTopics(String brokerId) { + final BrokerData brokerData = loadData.getBrokerData().get(brokerId); return brokerData != null && brokerData.getLocalData() != null && brokerData.getLocalData().isPersistentTopicsEnabled(); } @Override - public boolean isEnableNonPersistentTopics(String brokerUrl) { - final BrokerData brokerData = loadData.getBrokerData().get(brokerUrl.replace("http://", "")); + public boolean isEnableNonPersistentTopics(String brokerId) { + final BrokerData brokerData = loadData.getBrokerData().get(brokerId); return brokerData != null && brokerData.getLocalData() != null && brokerData.getLocalData().isNonPersistentTopicsEnabled(); } @@ -559,17 +559,6 @@ private void updateBundleData() { bundleData.put(bundle, currentBundleData); } } - - //Remove not active bundle from loadData - for (String bundle : bundleData.keySet()) { - if (!activeBundles.contains(bundle)){ - bundleData.remove(bundle); - if (pulsar.getLeaderElectionService().isLeader()){ - deleteBundleDataFromMetadataStore(bundle); - } - } - } - // Remove all loaded bundles from the preallocated maps. final Map preallocatedBundleData = brokerData.getPreallocatedBundleData(); Set ownedNsBundles = pulsar.getNamespaceService().getOwnedServiceUnits() @@ -604,6 +593,16 @@ private void updateBundleData() { LoadManagerShared.fillNamespaceToBundlesMap(preallocatedBundleData.keySet(), namespaceToBundleRange); } } + + // Remove not active bundle from loadData + for (String bundle : bundleData.keySet()) { + if (!activeBundles.contains(bundle)){ + bundleData.remove(bundle); + if (pulsar.getLeaderElectionService().isLeader()){ + deleteBundleDataFromMetadataStore(bundle); + } + } + } } /** @@ -979,14 +978,14 @@ public void start() throws PulsarServerException { // At this point, the ports will be updated with the real port number that the server was assigned Map protocolData = pulsar.getProtocolDataToAdvertise(); - lastData = new LocalBrokerData(pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), + lastData = new LocalBrokerData(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), pulsar.getAdvertisedListeners()); lastData.setProtocols(protocolData); // configure broker-topic mode lastData.setPersistentTopicsEnabled(pulsar.getConfiguration().isEnablePersistentTopics()); lastData.setNonPersistentTopicsEnabled(pulsar.getConfiguration().isEnableNonPersistentTopics()); - localData = new LocalBrokerData(pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), + localData = new LocalBrokerData(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), pulsar.getAdvertisedListeners()); localData.setProtocols(protocolData); localData.setBrokerVersionString(pulsar.getBrokerVersion()); @@ -995,9 +994,9 @@ public void start() throws PulsarServerException { localData.setNonPersistentTopicsEnabled(pulsar.getConfiguration().isEnableNonPersistentTopics()); localData.setLoadManagerClassName(conf.getLoadManagerClassName()); - String lookupServiceAddress = pulsar.getLookupServiceAddress(); - brokerZnodePath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + lookupServiceAddress; - final String timeAverageZPath = TIME_AVERAGE_BROKER_ZPATH + "/" + lookupServiceAddress; + String brokerId = pulsar.getBrokerId(); + brokerZnodePath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + brokerId; + final String timeAverageZPath = TIME_AVERAGE_BROKER_ZPATH + "/" + brokerId; updateLocalBrokerData(); brokerDataLock = brokersData.acquireLock(brokerZnodePath, localData).join(); @@ -1231,7 +1230,6 @@ public String setNamespaceBundleAffinity(String bundle, String broker) { if (StringUtils.isBlank(broker)) { return this.bundleBrokerAffinityMap.remove(bundle); } - broker = broker.replaceFirst("http[s]?://", ""); return this.bundleBrokerAffinityMap.put(bundle, broker); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java index c61d39cf3159a..c8d81bda1bc13 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java @@ -19,7 +19,6 @@ package org.apache.pulsar.broker.loadbalance.impl; import java.util.List; -import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -32,7 +31,6 @@ import org.apache.pulsar.common.naming.ServiceUnitId; import org.apache.pulsar.common.stats.Metrics; import org.apache.pulsar.policies.data.loadbalancer.LoadManagerReport; -import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; /** * Wrapper class allowing classes of instance ModularLoadManager to be compatible with the interface LoadManager. @@ -75,20 +73,6 @@ public Optional getLeastLoaded(final ServiceUnitId serviceUnit) { return leastLoadedBroker.map(this::buildBrokerResourceUnit); } - private String getBrokerWebServiceUrl(String broker) { - LocalBrokerData localData = (loadManager).getBrokerLocalData(broker); - if (localData != null) { - return localData.getWebServiceUrl() != null ? localData.getWebServiceUrl() - : localData.getWebServiceUrlTls(); - } - return String.format("http://%s", broker); - } - - private String getBrokerZnodeName(String broker, String webServiceUrl) { - String scheme = webServiceUrl.substring(0, webServiceUrl.indexOf("://")); - return String.format("%s://%s", scheme, broker); - } - @Override public List getLoadBalancingMetrics() { return loadManager.getLoadBalancingMetrics(); @@ -149,10 +133,7 @@ public CompletableFuture> getAvailableBrokersAsync() { } private SimpleResourceUnit buildBrokerResourceUnit (String broker) { - String webServiceUrl = getBrokerWebServiceUrl(broker); - String brokerZnodeName = getBrokerZnodeName(broker, webServiceUrl); - return new SimpleResourceUnit(webServiceUrl, - new PulsarResourceDescription(), Map.of(ResourceUnit.PROPERTY_KEY_BROKER_ZNODE_NAME, brokerZnodeName)); + return new SimpleResourceUnit(broker, new PulsarResourceDescription()); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java index 5e99456971147..8bb27e52298be 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java @@ -211,15 +211,15 @@ public SimpleLoadManagerImpl() { .build(); this.brokerTopicLoadingPredicate = new BrokerTopicLoadingPredicate() { @Override - public boolean isEnablePersistentTopics(String brokerUrl) { - ResourceUnit ru = new SimpleResourceUnit(brokerUrl, new PulsarResourceDescription()); + public boolean isEnablePersistentTopics(String brokerId) { + ResourceUnit ru = new SimpleResourceUnit(brokerId, new PulsarResourceDescription()); LoadReport loadReport = currentLoadReports.get(ru); return loadReport != null && loadReport.isPersistentTopicsEnabled(); } @Override - public boolean isEnableNonPersistentTopics(String brokerUrl) { - ResourceUnit ru = new SimpleResourceUnit(brokerUrl, new PulsarResourceDescription()); + public boolean isEnableNonPersistentTopics(String brokerId) { + ResourceUnit ru = new SimpleResourceUnit(brokerId, new PulsarResourceDescription()); LoadReport loadReport = currentLoadReports.get(ru); return loadReport != null && loadReport.isNonPersistentTopicsEnabled(); } @@ -234,7 +234,7 @@ public void initialize(final PulsarService pulsar) { brokerHostUsage = new GenericBrokerHostUsageImpl(pulsar); } this.policies = new SimpleResourceAllocationPolicies(pulsar); - lastLoadReport = new LoadReport(pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), + lastLoadReport = new LoadReport(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls()); lastLoadReport.setProtocols(pulsar.getProtocolDataToAdvertise()); lastLoadReport.setPersistentTopicsEnabled(pulsar.getConfiguration().isEnablePersistentTopics()); @@ -266,8 +266,8 @@ public SimpleLoadManagerImpl(PulsarService pulsar) { @Override public void start() throws PulsarServerException { // Register the brokers in metadata store - String lookupServiceAddress = pulsar.getLookupServiceAddress(); - String brokerLockPath = LOADBALANCE_BROKERS_ROOT + "/" + lookupServiceAddress; + String brokerId = pulsar.getBrokerId(); + String brokerLockPath = LOADBALANCE_BROKERS_ROOT + "/" + brokerId; try { LoadReport loadReport = null; @@ -653,7 +653,6 @@ public void writeResourceQuotasToZooKeeper() throws Exception { */ private synchronized void doLoadRanking() { ResourceUnitRanking.setCpuUsageByMsgRate(this.realtimeCpuLoadFactor); - String hostname = pulsar.getAdvertisedAddress(); String strategy = this.getLoadBalancerPlacementStrategy(); log.info("doLoadRanking - load balancing strategy: {}", strategy); if (!currentLoadReports.isEmpty()) { @@ -702,8 +701,8 @@ private synchronized void doLoadRanking() { } // update metrics - if (resourceUnit.getResourceId().contains(hostname)) { - updateLoadBalancingMetrics(hostname, finalRank, ranking); + if (resourceUnit.getResourceId().equals(pulsar.getBrokerId())) { + updateLoadBalancingMetrics(pulsar.getAdvertisedAddress(), finalRank, ranking); } } updateBrokerToNamespaceToBundle(); @@ -711,7 +710,7 @@ private synchronized void doLoadRanking() { this.resourceUnitRankings = newResourceUnitRankings; } else { log.info("Leader broker[{}] No ResourceUnits to rank this run, Using Old Ranking", - pulsar.getSafeWebServiceAddress()); + pulsar.getBrokerId()); } } @@ -855,7 +854,7 @@ private synchronized ResourceUnit findBrokerForPlacement(Multimap ConcurrentOpenHashMap.>newBuilder() .build()) @@ -876,7 +875,7 @@ private Multimap getFinalCandidates(ServiceUnitId serviceUni availableBrokersCache.clear(); for (final Set resourceUnits : availableBrokers.values()) { for (final ResourceUnit resourceUnit : resourceUnits) { - availableBrokersCache.add(resourceUnit.getResourceId().replace("http://", "")); + availableBrokersCache.add(resourceUnit.getResourceId()); } } brokerCandidateCache.clear(); @@ -899,7 +898,7 @@ private Multimap getFinalCandidates(ServiceUnitId serviceUni final Long rank = entry.getKey(); final Set resourceUnits = entry.getValue(); for (final ResourceUnit resourceUnit : resourceUnits) { - if (brokerCandidateCache.contains(resourceUnit.getResourceId().replace("http://", ""))) { + if (brokerCandidateCache.contains(resourceUnit.getResourceId())) { result.put(rank, resourceUnit); } } @@ -928,8 +927,7 @@ private Map> getAvailableBrokers(ServiceUnitId serviceUn availableBrokers = new HashMap<>(); for (String broker : activeBrokers) { - ResourceUnit resourceUnit = new SimpleResourceUnit(String.format("http://%s", broker), - new PulsarResourceDescription()); + ResourceUnit resourceUnit = new SimpleResourceUnit(broker, new PulsarResourceDescription()); availableBrokers.computeIfAbsent(0L, key -> new TreeSet<>()).add(resourceUnit); } log.info("Choosing at random from broker list: [{}]", availableBrokers.values()); @@ -956,7 +954,7 @@ private synchronized ResourceUnit getLeastLoadedBroker(ServiceUnitId serviceUnit Iterator> candidateIterator = finalCandidates.entries().iterator(); while (candidateIterator.hasNext()) { Map.Entry candidate = candidateIterator.next(); - String candidateBrokerName = candidate.getValue().getResourceId().replace("http://", ""); + String candidateBrokerName = candidate.getValue().getResourceId(); if (!activeBrokers.contains(candidateBrokerName)) { candidateIterator.remove(); // Current candidate points to an inactive broker, so remove it } @@ -1005,8 +1003,7 @@ private void updateRanking() { try { String key = String.format("%s/%s", LOADBALANCE_BROKERS_ROOT, broker); LoadReport lr = loadReports.readLock(key).join().get(); - ResourceUnit ru = new SimpleResourceUnit(String.format("http://%s", lr.getName()), - fromLoadReport(lr)); + ResourceUnit ru = new SimpleResourceUnit(lr.getName(), fromLoadReport(lr)); this.currentLoadReports.put(ru, lr); } catch (Exception e) { log.warn("Error reading load report from Cache for broker - [{}], [{}]", broker, e); @@ -1072,13 +1069,13 @@ public LoadReport generateLoadReport() throws Exception { private LoadReport generateLoadReportForcefully() throws Exception { synchronized (bundleGainsCache) { try { - LoadReport loadReport = new LoadReport(pulsar.getSafeWebServiceAddress(), + LoadReport loadReport = new LoadReport(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls()); loadReport.setProtocols(pulsar.getProtocolDataToAdvertise()); loadReport.setNonPersistentTopicsEnabled(pulsar.getConfiguration().isEnableNonPersistentTopics()); loadReport.setPersistentTopicsEnabled(pulsar.getConfiguration().isEnablePersistentTopics()); - loadReport.setName(pulsar.getLookupServiceAddress()); + loadReport.setName(pulsar.getBrokerId()); loadReport.setBrokerVersionString(pulsar.getBrokerVersion()); SystemResourceUsage systemResourceUsage = this.getSystemResourceUsage(); @@ -1121,8 +1118,8 @@ private LoadReport generateLoadReportForcefully() throws Exception { loadReport.setAllocatedMsgRateIn(allocatedQuota.getMsgRateIn()); loadReport.setAllocatedMsgRateOut(allocatedQuota.getMsgRateOut()); - final ResourceUnit resourceUnit = new SimpleResourceUnit( - String.format("http://%s", loadReport.getName()), fromLoadReport(loadReport)); + final ResourceUnit resourceUnit = + new SimpleResourceUnit(loadReport.getName(), fromLoadReport(loadReport)); Set preAllocatedBundles; if (resourceUnitRankings.containsKey(resourceUnit)) { preAllocatedBundles = resourceUnitRankings.get(resourceUnit).getPreAllocatedBundles(); @@ -1277,7 +1274,7 @@ private synchronized void updateBrokerToNamespaceToBundle() { final Set preallocatedBundles = resourceUnitRankings.get(resourceUnit).getPreAllocatedBundles(); final ConcurrentOpenHashMap> namespaceToBundleRange = brokerToNamespaceToBundleRange - .computeIfAbsent(broker.replace("http://", ""), + .computeIfAbsent(broker, k -> ConcurrentOpenHashMap.>newBuilder() .build()); @@ -1455,7 +1452,6 @@ public String setNamespaceBundleAffinity(String bundle, String broker) { if (StringUtils.isBlank(broker)) { return this.bundleBrokerAffinityMap.remove(bundle); } - broker = broker.replaceFirst("http[s]?://", ""); return this.bundleBrokerAffinityMap.put(bundle, broker); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedder.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedder.java index b92af5b7c69f3..d8dcfa007cfc5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedder.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedder.java @@ -25,7 +25,7 @@ import org.apache.commons.lang3.mutable.MutableDouble; import org.apache.commons.lang3.mutable.MutableInt; import org.apache.commons.lang3.mutable.MutableObject; -import org.apache.commons.lang3.tuple.Triple; +import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.loadbalance.LoadData; import org.apache.pulsar.broker.loadbalance.LoadSheddingStrategy; @@ -36,7 +36,7 @@ /** * This strategy tends to distribute load uniformly across all brokers. This strategy checks load difference between - * broker with highest load and broker with lowest load. If the difference is higher than configured thresholds + * broker with the highest load and broker with the lowest load. If the difference is higher than configured thresholds * {@link ServiceConfiguration#getLoadBalancerMsgRateDifferenceShedderThreshold()} or * {@link ServiceConfiguration#getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold()} then it finds out * bundles which can be unloaded to distribute traffic evenly across all brokers. @@ -63,25 +63,37 @@ public Multimap findBundlesForUnloading(final LoadData loadData, Map loadBundleData = loadData.getBundleDataForLoadShedding(); Map recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); - MutableObject overloadedBroker = new MutableObject<>(); - MutableObject underloadedBroker = new MutableObject<>(); + MutableObject msgRateOverloadedBroker = new MutableObject<>(); + MutableObject msgThroughputOverloadedBroker = new MutableObject<>(); + MutableObject msgRateUnderloadedBroker = new MutableObject<>(); + MutableObject msgThroughputUnderloadedBroker = new MutableObject<>(); MutableDouble maxMsgRate = new MutableDouble(-1); - MutableDouble maxThroughputRate = new MutableDouble(-1); + MutableDouble maxThroughput = new MutableDouble(-1); MutableDouble minMsgRate = new MutableDouble(Integer.MAX_VALUE); - MutableDouble minThroughputRate = new MutableDouble(Integer.MAX_VALUE); + MutableDouble minThroughput = new MutableDouble(Integer.MAX_VALUE); + brokersData.forEach((broker, data) -> { double msgRate = data.getLocalData().getMsgRateIn() + data.getLocalData().getMsgRateOut(); double throughputRate = data.getLocalData().getMsgThroughputIn() + data.getLocalData().getMsgThroughputOut(); - if (msgRate > maxMsgRate.getValue() || throughputRate > maxThroughputRate.getValue()) { - overloadedBroker.setValue(broker); + if (msgRate > maxMsgRate.getValue()) { + msgRateOverloadedBroker.setValue(broker); maxMsgRate.setValue(msgRate); - maxThroughputRate.setValue(throughputRate); } - if (msgRate < minMsgRate.getValue() || throughputRate < minThroughputRate.getValue()) { - underloadedBroker.setValue(broker); + + if (throughputRate > maxThroughput.getValue()) { + msgThroughputOverloadedBroker.setValue(broker); + maxThroughput.setValue(throughputRate); + } + + if (msgRate < minMsgRate.getValue()) { + msgRateUnderloadedBroker.setValue(broker); minMsgRate.setValue(msgRate); - minThroughputRate.setValue(throughputRate); + } + + if (throughputRate < minThroughput.getValue()) { + msgThroughputUnderloadedBroker.setValue(broker); + minThroughput.setValue(throughputRate); } }); @@ -91,12 +103,12 @@ public Multimap findBundlesForUnloading(final LoadData loadData, if (minMsgRate.getValue() <= EPS && minMsgRate.getValue() >= -EPS) { minMsgRate.setValue(1.0); } - if (minThroughputRate.getValue() <= EPS && minThroughputRate.getValue() >= -EPS) { - minThroughputRate.setValue(1.0); + if (minThroughput.getValue() <= EPS && minThroughput.getValue() >= -EPS) { + minThroughput.setValue(1.0); } double msgRateDifferencePercentage = ((maxMsgRate.getValue() - minMsgRate.getValue()) * 100) / (minMsgRate.getValue()); - double msgThroughputDifferenceRate = maxThroughputRate.getValue() / minThroughputRate.getValue(); + double msgThroughputDifferenceRate = maxThroughput.getValue() / minThroughput.getValue(); // if the threshold matches then find out how much load needs to be unloaded by considering number of msgRate // and throughput. @@ -105,66 +117,91 @@ public Multimap findBundlesForUnloading(final LoadData loadData, boolean isMsgThroughputThresholdExceeded = conf .getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold() > 0 && msgThroughputDifferenceRate > conf - .getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(); + .getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(); if (isMsgRateThresholdExceeded || isMsgThroughputThresholdExceeded) { - if (log.isDebugEnabled()) { - log.debug( - "Found bundles for uniform load balancing. " - + "overloaded broker {} with (msgRate,throughput)= ({},{}) " - + "and underloaded broker {} with (msgRate,throughput)= ({},{})", - overloadedBroker.getValue(), maxMsgRate.getValue(), maxThroughputRate.getValue(), - underloadedBroker.getValue(), minMsgRate.getValue(), minThroughputRate.getValue()); - } MutableInt msgRateRequiredFromUnloadedBundles = new MutableInt( (int) ((maxMsgRate.getValue() - minMsgRate.getValue()) * conf.getMaxUnloadPercentage())); MutableInt msgThroughputRequiredFromUnloadedBundles = new MutableInt( - (int) ((maxThroughputRate.getValue() - minThroughputRate.getValue()) + (int) ((maxThroughput.getValue() - minThroughput.getValue()) * conf.getMaxUnloadPercentage())); - LocalBrokerData overloadedBrokerData = brokersData.get(overloadedBroker.getValue()).getLocalData(); - - if (overloadedBrokerData.getBundles().size() > 1 - && (msgRateRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessage() - || msgThroughputRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessageThroughput())) { - // Sort bundles by throughput, then pick the bundle which can help to reduce load uniformly with - // under-loaded broker - loadBundleData.entrySet().stream() - .filter(e -> overloadedBrokerData.getBundles().contains(e.getKey())) - .map((e) -> { - String bundle = e.getKey(); - BundleData bundleData = e.getValue(); - TimeAverageMessageData shortTermData = bundleData.getShortTermData(); - double throughput = isMsgRateThresholdExceeded - ? shortTermData.getMsgRateIn() + shortTermData.getMsgRateOut() - : shortTermData.getMsgThroughputIn() + shortTermData.getMsgThroughputOut(); - return Triple.of(bundle, bundleData, throughput); - }).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft())) - .sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> { - if (conf.getMaxUnloadBundleNumPerShedding() != -1 - && selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) { - return; - } - String bundle = e.getLeft(); - BundleData bundleData = e.getMiddle(); - TimeAverageMessageData shortTermData = bundleData.getShortTermData(); - double throughput = shortTermData.getMsgThroughputIn() - + shortTermData.getMsgThroughputOut(); - double bundleMsgRate = shortTermData.getMsgRateIn() + shortTermData.getMsgRateOut(); - if (isMsgRateThresholdExceeded) { + if (isMsgRateThresholdExceeded) { + if (log.isDebugEnabled()) { + log.debug("Found bundles for uniform load balancing. " + + "msgRate overloaded broker: {} with msgRate: {}, " + + "msgRate underloaded broker: {} with msgRate: {}", + msgRateOverloadedBroker.getValue(), maxMsgRate.getValue(), + msgRateUnderloadedBroker.getValue(), minMsgRate.getValue()); + } + LocalBrokerData overloadedBrokerData = + brokersData.get(msgRateOverloadedBroker.getValue()).getLocalData(); + if (overloadedBrokerData.getBundles().size() > 1 + && (msgRateRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessage())) { + // Sort bundles by throughput, then pick the bundle which can help to reduce load uniformly with + // under-loaded broker + loadBundleData.entrySet().stream() + .filter(e -> overloadedBrokerData.getBundles().contains(e.getKey())) + .map((e) -> { + String bundle = e.getKey(); + TimeAverageMessageData shortTermData = e.getValue().getShortTermData(); + double msgRate = shortTermData.getMsgRateIn() + shortTermData.getMsgRateOut(); + return Pair.of(bundle, msgRate); + }).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft())) + .sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> { + if (conf.getMaxUnloadBundleNumPerShedding() != -1 + && selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) { + return; + } + String bundle = e.getLeft(); + double bundleMsgRate = e.getRight(); if (bundleMsgRate <= (msgRateRequiredFromUnloadedBundles.getValue() + 1000/* delta */)) { log.info("Found bundle to unload with msgRate {}", bundleMsgRate); msgRateRequiredFromUnloadedBundles.add(-bundleMsgRate); - selectedBundlesCache.put(overloadedBroker.getValue(), bundle); + selectedBundlesCache.put(msgRateOverloadedBroker.getValue(), bundle); } - } else { - if (throughput <= (msgThroughputRequiredFromUnloadedBundles.getValue())) { - log.info("Found bundle to unload with throughput {}", throughput); - msgThroughputRequiredFromUnloadedBundles.add(-throughput); - selectedBundlesCache.put(overloadedBroker.getValue(), bundle); + }); + } + } else { + if (log.isDebugEnabled()) { + log.debug("Found bundles for uniform load balancing. " + + "msgThroughput overloaded broker: {} with msgThroughput {}, " + + "msgThroughput underloaded broker: {} with msgThroughput: {}", + msgThroughputOverloadedBroker.getValue(), maxThroughput.getValue(), + msgThroughputUnderloadedBroker.getValue(), minThroughput.getValue()); + } + LocalBrokerData overloadedBrokerData = + brokersData.get(msgThroughputOverloadedBroker.getValue()).getLocalData(); + if (overloadedBrokerData.getBundles().size() > 1 + && + msgThroughputRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessageThroughput()) { + // Sort bundles by throughput, then pick the bundle which can help to reduce load uniformly with + // under-loaded broker + loadBundleData.entrySet().stream() + .filter(e -> overloadedBrokerData.getBundles().contains(e.getKey())) + .map((e) -> { + String bundle = e.getKey(); + TimeAverageMessageData shortTermData = e.getValue().getShortTermData(); + double msgThroughput = shortTermData.getMsgThroughputIn() + + shortTermData.getMsgThroughputOut(); + return Pair.of(bundle, msgThroughput); + }).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft())) + .sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> { + if (conf.getMaxUnloadBundleNumPerShedding() != -1 + && selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) { + return; } - } - }); + String bundle = e.getLeft(); + double msgThroughput = e.getRight(); + if (msgThroughput <= (msgThroughputRequiredFromUnloadedBundles.getValue() + + 1000/* delta */)) { + log.info("Found bundle to unload with msgThroughput {}", msgThroughput); + msgThroughputRequiredFromUnloadedBundles.add(-msgThroughput); + selectedBundlesCache.put(msgThroughputOverloadedBroker.getValue(), bundle); + } + }); + } + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java index bd70201cba55d..7b2c777414884 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java @@ -26,11 +26,11 @@ import java.net.URISyntaxException; import java.util.Optional; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; import javax.ws.rs.Encoded; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; +import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; @@ -48,6 +48,7 @@ import org.apache.pulsar.common.policies.data.TopicOperation; import org.apache.pulsar.common.util.Codec; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.metadata.api.MetadataStoreException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,7 +57,7 @@ public class TopicLookupBase extends PulsarWebResource { private static final String LOOKUP_PATH_V1 = "/lookup/v2/destination/"; private static final String LOOKUP_PATH_V2 = "/lookup/v2/topic/"; - protected CompletableFuture internalLookupTopicAsync(TopicName topicName, boolean authoritative, + protected CompletableFuture internalLookupTopicAsync(final TopicName topicName, boolean authoritative, String listenerName) { if (!pulsar().getBrokerService().getLookupRequestSemaphore().tryAcquire()) { log.warn("No broker was found available for topic {}", topicName); @@ -79,7 +80,8 @@ protected CompletableFuture internalLookupTopicAsync(TopicName topic }) .thenCompose(exist -> { if (!exist) { - throw new RestException(Response.Status.NOT_FOUND, "Topic not found."); + throw new RestException(Response.Status.NOT_FOUND, + String.format("Topic not found %s", topicName.toString())); } CompletableFuture> lookupFuture = pulsar().getNamespaceService() .getBrokerServiceUrlAsync(topicName, @@ -131,10 +133,10 @@ protected CompletableFuture internalLookupTopicAsync(TopicName topic pulsar().getBrokerService().getLookupRequestSemaphore().release(); return result.getLookupData(); } - }).exceptionally(ex->{ - pulsar().getBrokerService().getLookupRequestSemaphore().release(); - throw FutureUtil.wrapToCompletionException(ex); }); + }).exceptionally(ex -> { + pulsar().getBrokerService().getLookupRequestSemaphore().release(); + throw FutureUtil.wrapToCompletionException(ex); }); } @@ -318,35 +320,40 @@ public static CompletableFuture lookupTopicAsync(PulsarService pulsarSe requestId, shouldRedirectThroughServiceUrl(conf, lookupData))); } }).exceptionally(ex -> { - if (ex instanceof CompletionException && ex.getCause() instanceof IllegalStateException) { - log.info("Failed to lookup {} for topic {} with error {}", clientAppId, - topicName.toString(), ex.getCause().getMessage()); - } else { - log.warn("Failed to lookup {} for topic {} with error {}", clientAppId, - topicName.toString(), ex.getMessage(), ex); - } - lookupfuture.complete( - newLookupErrorResponse(ServerError.ServiceNotReady, ex.getMessage(), requestId)); - return null; - }); + handleLookupError(lookupfuture, topicName.toString(), clientAppId, requestId, ex); + return null; + }); } - }).exceptionally(ex -> { - if (ex instanceof CompletionException && ex.getCause() instanceof IllegalStateException) { - log.info("Failed to lookup {} for topic {} with error {}", clientAppId, topicName.toString(), - ex.getCause().getMessage()); - } else { - log.warn("Failed to lookup {} for topic {} with error {}", clientAppId, topicName.toString(), - ex.getMessage(), ex); - } - - lookupfuture.complete(newLookupErrorResponse(ServerError.ServiceNotReady, ex.getMessage(), requestId)); + handleLookupError(lookupfuture, topicName.toString(), clientAppId, requestId, ex); return null; }); return lookupfuture; } + private static void handleLookupError(CompletableFuture lookupFuture, String topicName, String clientAppId, + long requestId, Throwable ex){ + Throwable unwrapEx = FutureUtil.unwrapCompletionException(ex); + final String errorMsg = unwrapEx.getMessage(); + if (unwrapEx instanceof PulsarServerException) { + unwrapEx = FutureUtil.unwrapCompletionException(unwrapEx.getCause()); + } + if (unwrapEx instanceof IllegalStateException) { + // Current broker still hold the bundle's lock, but the bundle is being unloading. + log.info("Failed to lookup {} for topic {} with error {}", clientAppId, topicName, errorMsg); + lookupFuture.complete(newLookupErrorResponse(ServerError.MetadataError, errorMsg, requestId)); + } else if (unwrapEx instanceof MetadataStoreException) { + // Load bundle ownership or acquire lock failed. + // Differ with "IllegalStateException", print warning log. + log.warn("Failed to lookup {} for topic {} with error {}", clientAppId, topicName, errorMsg); + lookupFuture.complete(newLookupErrorResponse(ServerError.MetadataError, errorMsg, requestId)); + } else { + log.warn("Failed to lookup {} for topic {} with error {}", clientAppId, topicName, errorMsg); + lookupFuture.complete(newLookupErrorResponse(ServerError.ServiceNotReady, errorMsg, requestId)); + } + } + protected TopicName getTopicName(String topicDomain, String tenant, String cluster, String namespace, @Encoded String encodedTopic) { String decodedName = Codec.decode(encodedTopic); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java index 585d62c5b1f27..61e045ed304fd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java @@ -133,9 +133,9 @@ public class NamespaceService implements AutoCloseable { public static final Pattern HEARTBEAT_NAMESPACE_PATTERN = Pattern.compile("pulsar/[^/]+/([^:]+:\\d+)"); public static final Pattern HEARTBEAT_NAMESPACE_PATTERN_V2 = Pattern.compile("pulsar/([^:]+:\\d+)"); public static final Pattern SLA_NAMESPACE_PATTERN = Pattern.compile(SLA_NAMESPACE_PROPERTY + "/[^/]+/([^:]+:\\d+)"); - public static final String HEARTBEAT_NAMESPACE_FMT = "pulsar/%s/%s:%s"; - public static final String HEARTBEAT_NAMESPACE_FMT_V2 = "pulsar/%s:%s"; - public static final String SLA_NAMESPACE_FMT = SLA_NAMESPACE_PROPERTY + "/%s/%s:%s"; + public static final String HEARTBEAT_NAMESPACE_FMT = "pulsar/%s/%s"; + public static final String HEARTBEAT_NAMESPACE_FMT_V2 = "pulsar/%s"; + public static final String SLA_NAMESPACE_FMT = SLA_NAMESPACE_PROPERTY + "/%s/%s"; private final ConcurrentOpenHashMap namespaceClients; @@ -164,7 +164,7 @@ public class NamespaceService implements AutoCloseable { */ public NamespaceService(PulsarService pulsar) { this.pulsar = pulsar; - host = pulsar.getAdvertisedAddress(); + this.host = pulsar.getAdvertisedAddress(); this.config = pulsar.getConfiguration(); this.loadManager = pulsar.getLoadManager(); this.bundleFactory = new NamespaceBundleFactory(pulsar, Hashing.crc32()); @@ -189,10 +189,10 @@ public CompletableFuture> getBrokerServiceUrlAsync(TopicN CompletableFuture> future = getBundleAsync(topic) .thenCompose(bundle -> { // Do redirection if the cluster is in rollback or deploying. - return redirectManager.findRedirectLookupResultAsync().thenCompose(optResult -> { + return findRedirectLookupResultAsync(bundle).thenCompose(optResult -> { if (optResult.isPresent()) { LOG.info("[{}] Redirect lookup request to {} for topic {}", - pulsar.getSafeWebServiceAddress(), optResult.get(), topic); + pulsar.getBrokerId(), optResult.get(), topic); return CompletableFuture.completedFuture(optResult); } if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config)) { @@ -221,6 +221,13 @@ public CompletableFuture> getBrokerServiceUrlAsync(TopicN return future; } + private CompletableFuture> findRedirectLookupResultAsync(ServiceUnitId bundle) { + if (isSLAOrHeartbeatNamespace(bundle.getNamespaceObject().toString())) { + return CompletableFuture.completedFuture(Optional.empty()); + } + return redirectManager.findRedirectLookupResultAsync(); + } + public CompletableFuture getBundleAsync(TopicName topic) { return bundleFactory.getBundlesAsync(topic.getNamespaceObject()) .thenApply(bundles -> bundles.findBundle(topic)); @@ -288,11 +295,10 @@ public Optional getWebServiceUrl(ServiceUnitId suName, LookupOptions option private CompletableFuture> internalGetWebServiceUrl(@Nullable ServiceUnitId topic, NamespaceBundle bundle, LookupOptions options) { - - return redirectManager.findRedirectLookupResultAsync().thenCompose(optResult -> { + return findRedirectLookupResultAsync(bundle).thenCompose(optResult -> { if (optResult.isPresent()) { LOG.info("[{}] Redirect lookup request to {} for topic {}", - pulsar.getSafeWebServiceAddress(), optResult.get(), topic); + pulsar.getBrokerId(), optResult.get(), topic); try { LookupData lookupData = optResult.get().getLookupData(); final String redirectUrl = options.isRequestHttps() @@ -332,15 +338,17 @@ private CompletableFuture> internalGetWebServiceUrl(@Nullable Serv * @throws PulsarServerException if an unexpected error occurs */ public void registerBootstrapNamespaces() throws PulsarServerException { - + String brokerId = pulsar.getBrokerId(); // ensure that we own the heartbeat namespace - if (registerNamespace(getHeartbeatNamespace(host, config), true)) { - LOG.info("added heartbeat namespace name in local cache: ns={}", getHeartbeatNamespace(host, config)); + if (registerNamespace(getHeartbeatNamespace(brokerId, config), true)) { + LOG.info("added heartbeat namespace name in local cache: ns={}", + getHeartbeatNamespace(brokerId, config)); } // ensure that we own the heartbeat namespace - if (registerNamespace(getHeartbeatNamespaceV2(host, config), true)) { - LOG.info("added heartbeat namespace name in local cache: ns={}", getHeartbeatNamespaceV2(host, config)); + if (registerNamespace(getHeartbeatNamespaceV2(brokerId, config), true)) { + LOG.info("added heartbeat namespace name in local cache: ns={}", + getHeartbeatNamespaceV2(brokerId, config)); } // we may not need strict ownership checking for bootstrap names for now @@ -498,7 +506,6 @@ private void searchForCandidateBroker(NamespaceBundle bundle, return; } String candidateBroker; - String candidateBrokerAdvertisedAddr = null; LeaderElectionService les = pulsar.getLeaderElectionService(); if (les == null) { @@ -533,14 +540,14 @@ private void searchForCandidateBroker(NamespaceBundle bundle, if (options.isAuthoritative()) { // leader broker already assigned the current broker as owner - candidateBroker = pulsar.getSafeWebServiceAddress(); + candidateBroker = pulsar.getBrokerId(); } else { LoadManager loadManager = this.loadManager.get(); boolean makeLoadManagerDecisionOnThisBroker = !loadManager.isCentralized() || les.isLeader(); if (!makeLoadManagerDecisionOnThisBroker) { // If leader is not active, fallback to pick the least loaded from current broker loadmanager boolean leaderBrokerActive = currentLeader.isPresent() - && isBrokerActive(currentLeader.get().getServiceUrl()); + && isBrokerActive(currentLeader.get().getBrokerId()); if (!leaderBrokerActive) { makeLoadManagerDecisionOnThisBroker = true; if (currentLeader.isEmpty()) { @@ -559,7 +566,7 @@ private void searchForCandidateBroker(NamespaceBundle bundle, } } if (makeLoadManagerDecisionOnThisBroker) { - Optional> availableBroker = getLeastLoadedFromLoadManager(bundle); + Optional availableBroker = getLeastLoadedFromLoadManager(bundle); if (availableBroker.isEmpty()) { LOG.warn("Load manager didn't return any available broker. " + "Returning empty result to lookup. NamespaceBundle[{}]", @@ -567,12 +574,11 @@ private void searchForCandidateBroker(NamespaceBundle bundle, lookupFuture.complete(Optional.empty()); return; } - candidateBroker = availableBroker.get().getLeft(); - candidateBrokerAdvertisedAddr = availableBroker.get().getRight(); + candidateBroker = availableBroker.get(); authoritativeRedirect = true; } else { // forward to leader broker to make assignment - candidateBroker = currentLeader.get().getServiceUrl(); + candidateBroker = currentLeader.get().getBrokerId(); } } } @@ -585,7 +591,7 @@ private void searchForCandidateBroker(NamespaceBundle bundle, try { Objects.requireNonNull(candidateBroker); - if (candidateBroker.equals(pulsar.getSafeWebServiceAddress())) { + if (candidateBroker.equals(pulsar.getBrokerId())) { // Load manager decided that the local broker should try to become the owner ownershipCache.tryAcquiringOwnership(bundle).thenAccept(ownerInfo -> { if (ownerInfo.isDisabled()) { @@ -636,8 +642,7 @@ private void searchForCandidateBroker(NamespaceBundle bundle, } // Now setting the redirect url - createLookupResult(candidateBrokerAdvertisedAddr == null ? candidateBroker - : candidateBrokerAdvertisedAddr, authoritativeRedirect, options.getAdvertisedListenerName()) + createLookupResult(candidateBroker, authoritativeRedirect, options.getAdvertisedListenerName()) .thenAccept(lookupResult -> lookupFuture.complete(Optional.of(lookupResult))) .exceptionally(ex -> { lookupFuture.completeExceptionally(ex); @@ -657,7 +662,7 @@ public CompletableFuture createLookupResult(String candidateBroker CompletableFuture lookupFuture = new CompletableFuture<>(); try { checkArgument(StringUtils.isNotBlank(candidateBroker), "Lookup broker can't be null %s", candidateBroker); - String path = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + parseHostAndPort(candidateBroker); + String path = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + candidateBroker; localBrokerDataCache.get(path).thenAccept(reportData -> { if (reportData.isPresent()) { @@ -693,30 +698,20 @@ public CompletableFuture createLookupResult(String candidateBroker return lookupFuture; } - private boolean isBrokerActive(String candidateBroker) { - String candidateBrokerHostAndPort = parseHostAndPort(candidateBroker); + public boolean isBrokerActive(String candidateBroker) { Set availableBrokers = getAvailableBrokers(); - if (availableBrokers.contains(candidateBrokerHostAndPort)) { + if (availableBrokers.contains(candidateBroker)) { if (LOG.isDebugEnabled()) { - LOG.debug("Broker {} ({}) is available for.", candidateBroker, candidateBrokerHostAndPort); + LOG.debug("Broker {} is available for.", candidateBroker); } return true; } else { - LOG.warn("Broker {} ({}) couldn't be found in available brokers {}", - candidateBroker, candidateBrokerHostAndPort, - String.join(",", availableBrokers)); + LOG.warn("Broker {} couldn't be found in available brokers {}", + candidateBroker, String.join(",", availableBrokers)); return false; } } - private static String parseHostAndPort(String candidateBroker) { - int uriSeparatorPos = candidateBroker.indexOf("://"); - if (uriSeparatorPos == -1) { - throw new IllegalArgumentException("'" + candidateBroker + "' isn't an URI."); - } - return candidateBroker.substring(uriSeparatorPos + 3); - } - private Set getAvailableBrokers() { try { return loadManager.get().getAvailableBrokers(); @@ -732,7 +727,7 @@ private Set getAvailableBrokers() { * @return the least loaded broker addresses * @throws Exception if an error occurs */ - private Optional> getLeastLoadedFromLoadManager(ServiceUnitId serviceUnit) throws Exception { + private Optional getLeastLoadedFromLoadManager(ServiceUnitId serviceUnit) throws Exception { Optional leastLoadedBroker = loadManager.get().getLeastLoaded(serviceUnit); if (leastLoadedBroker.isEmpty()) { LOG.warn("No broker is available for {}", serviceUnit); @@ -740,15 +735,13 @@ private Optional> getLeastLoadedFromLoadManager(ServiceUnit } String lookupAddress = leastLoadedBroker.get().getResourceId(); - String advertisedAddr = (String) leastLoadedBroker.get() - .getProperty(ResourceUnit.PROPERTY_KEY_BROKER_ZNODE_NAME); if (LOG.isDebugEnabled()) { LOG.debug("{} : redirecting to the least loaded broker, lookup address={}", - pulsar.getSafeWebServiceAddress(), + pulsar.getBrokerId(), lookupAddress); } - return Optional.of(Pair.of(lookupAddress, advertisedAddr)); + return Optional.of(lookupAddress); } public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle) { @@ -1441,6 +1434,9 @@ private CompletableFuture> getPartitionsForTopic(TopicName topicNam }); } + /*** + * List persistent topics names under a namespace, the topic name contains the partition suffix. + */ public CompletableFuture> getListOfPersistentTopics(NamespaceName namespaceName) { return pulsar.getPulsarResources().getTopicResources().listPersistentTopicsAsync(namespaceName); } @@ -1550,10 +1546,6 @@ public boolean checkOwnershipPresent(NamespaceBundle bundle) throws Exception { public CompletableFuture checkOwnershipPresentAsync(NamespaceBundle bundle) { if (ExtensibleLoadManagerImpl.isLoadManagerExtensionEnabled(config)) { - if (bundle.getNamespaceObject().equals(SYSTEM_NAMESPACE)) { - return FutureUtil.failedFuture(new UnsupportedOperationException( - "Ownership check for system namespace is not supported")); - } ExtensibleLoadManagerImpl extensibleLoadManager = ExtensibleLoadManagerImpl.get(loadManager.get()); return extensibleLoadManager.getOwnershipAsync(Optional.empty(), bundle) .thenApply(Optional::isPresent); @@ -1562,7 +1554,7 @@ public CompletableFuture checkOwnershipPresentAsync(NamespaceBundle bun } public void unloadSLANamespace() throws Exception { - NamespaceName namespaceName = getSLAMonitorNamespace(host, config); + NamespaceName namespaceName = getSLAMonitorNamespace(pulsar.getBrokerId(), config); LOG.info("Checking owner for SLA namespace {}", namespaceName); @@ -1579,41 +1571,23 @@ public void unloadSLANamespace() throws Exception { LOG.info("Namespace {} unloaded successfully", namespaceName); } - public static NamespaceName getHeartbeatNamespace(String host, ServiceConfiguration config) { - Integer port = null; - if (config.getWebServicePort().isPresent()) { - port = config.getWebServicePort().get(); - } else if (config.getWebServicePortTls().isPresent()) { - port = config.getWebServicePortTls().get(); - } - return NamespaceName.get(String.format(HEARTBEAT_NAMESPACE_FMT, config.getClusterName(), host, port)); + public static NamespaceName getHeartbeatNamespace(String lookupBroker, ServiceConfiguration config) { + return NamespaceName.get(String.format(HEARTBEAT_NAMESPACE_FMT, config.getClusterName(), lookupBroker)); } - public static NamespaceName getHeartbeatNamespaceV2(String host, ServiceConfiguration config) { - Integer port = null; - if (config.getWebServicePort().isPresent()) { - port = config.getWebServicePort().get(); - } else if (config.getWebServicePortTls().isPresent()) { - port = config.getWebServicePortTls().get(); - } - return NamespaceName.get(String.format(HEARTBEAT_NAMESPACE_FMT_V2, host, port)); + public static NamespaceName getHeartbeatNamespaceV2(String lookupBroker, ServiceConfiguration config) { + return NamespaceName.get(String.format(HEARTBEAT_NAMESPACE_FMT_V2, lookupBroker)); } - public static NamespaceName getSLAMonitorNamespace(String host, ServiceConfiguration config) { - Integer port = null; - if (config.getWebServicePort().isPresent()) { - port = config.getWebServicePort().get(); - } else if (config.getWebServicePortTls().isPresent()) { - port = config.getWebServicePortTls().get(); - } - return NamespaceName.get(String.format(SLA_NAMESPACE_FMT, config.getClusterName(), host, port)); + public static NamespaceName getSLAMonitorNamespace(String lookupBroker, ServiceConfiguration config) { + return NamespaceName.get(String.format(SLA_NAMESPACE_FMT, config.getClusterName(), lookupBroker)); } public static String checkHeartbeatNamespace(ServiceUnitId ns) { Matcher m = HEARTBEAT_NAMESPACE_PATTERN.matcher(ns.getNamespaceObject().toString()); if (m.matches()) { LOG.debug("Heartbeat namespace matched the lookup namespace {}", ns.getNamespaceObject().toString()); - return String.format("http://%s", m.group(1)); + return m.group(1); } else { return null; } @@ -1623,7 +1597,7 @@ public static String checkHeartbeatNamespaceV2(ServiceUnitId ns) { Matcher m = HEARTBEAT_NAMESPACE_PATTERN_V2.matcher(ns.getNamespaceObject().toString()); if (m.matches()) { LOG.debug("Heartbeat namespace v2 matched the lookup namespace {}", ns.getNamespaceObject().toString()); - return String.format("http://%s", m.group(1)); + return m.group(1); } else { return null; } @@ -1632,7 +1606,7 @@ public static String checkHeartbeatNamespaceV2(ServiceUnitId ns) { public static String getSLAMonitorBrokerName(ServiceUnitId ns) { Matcher m = SLA_NAMESPACE_PATTERN.matcher(ns.getNamespaceObject().toString()); if (m.matches()) { - return String.format("http://%s", m.group(1)); + return m.group(1); } else { return null; } @@ -1650,7 +1624,7 @@ public static boolean isSystemServiceNamespace(String namespace) { * @param namespace the namespace name * @return True if namespace is HEARTBEAT_NAMESPACE or SLA_NAMESPACE */ - public static boolean filterNamespaceForShedding(String namespace) { + public static boolean isSLAOrHeartbeatNamespace(String namespace) { return SLA_NAMESPACE_PATTERN.matcher(namespace).matches() || HEARTBEAT_NAMESPACE_PATTERN.matcher(namespace).matches() || HEARTBEAT_NAMESPACE_PATTERN_V2.matcher(namespace).matches(); @@ -1663,14 +1637,16 @@ public static boolean isHeartbeatNamespace(ServiceUnitId ns) { } public boolean registerSLANamespace() throws PulsarServerException { - boolean isNameSpaceRegistered = registerNamespace(getSLAMonitorNamespace(host, config), false); + String brokerId = pulsar.getBrokerId(); + boolean isNameSpaceRegistered = registerNamespace(getSLAMonitorNamespace(brokerId, config), false); if (isNameSpaceRegistered) { if (LOG.isDebugEnabled()) { LOG.debug("Added SLA Monitoring namespace name in local cache: ns={}", - getSLAMonitorNamespace(host, config)); + getSLAMonitorNamespace(brokerId, config)); } } else if (LOG.isDebugEnabled()) { - LOG.debug("SLA Monitoring not owned by the broker: ns={}", getSLAMonitorNamespace(host, config)); + LOG.debug("SLA Monitoring not owned by the broker: ns={}", + getSLAMonitorNamespace(brokerId, config)); } return isNameSpaceRegistered; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java index 86003153714cb..0033abf36c78c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java @@ -122,10 +122,10 @@ public OwnershipCache(PulsarService pulsar, NamespaceBundleFactory bundleFactory this.ownerBrokerUrl = pulsar.getBrokerServiceUrl(); this.ownerBrokerUrlTls = pulsar.getBrokerServiceUrlTls(); this.selfOwnerInfo = new NamespaceEphemeralData(ownerBrokerUrl, ownerBrokerUrlTls, - pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), + pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), false, pulsar.getAdvertisedListeners()); this.selfOwnerInfoDisabled = new NamespaceEphemeralData(ownerBrokerUrl, ownerBrokerUrlTls, - pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), + pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), true, pulsar.getAdvertisedListeners()); this.lockManager = pulsar.getCoordinationService().getLockManager(NamespaceEphemeralData.class); this.locallyAcquiredLocks = new ConcurrentHashMap<>(); @@ -336,7 +336,7 @@ public Map> getLocallyAcqu public synchronized boolean refreshSelfOwnerInfo() { this.selfOwnerInfo = new NamespaceEphemeralData(pulsar.getBrokerServiceUrl(), - pulsar.getBrokerServiceUrlTls(), pulsar.getSafeWebServiceAddress(), + pulsar.getBrokerServiceUrlTls(), pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(), false, pulsar.getAdvertisedListeners()); return selfOwnerInfo.getNativeUrl() != null || selfOwnerInfo.getNativeUrlTls() != null; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/ServiceUnitUtils.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/ServiceUnitUtils.java index c86aac5316fb9..432aa29798ebd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/ServiceUnitUtils.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/ServiceUnitUtils.java @@ -36,7 +36,7 @@ public final class ServiceUnitUtils { */ private static final String OWNER_INFO_ROOT = "/namespace"; - static String path(NamespaceBundle suname) { + public static String path(NamespaceBundle suname) { // The ephemeral node path for new namespaces should always have bundle name appended return OWNER_INFO_ROOT + "/" + suname.toString(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlers.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlers.java index 42a82b2de762b..4059ccf5f26eb 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlers.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlers.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.SocketAddress; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -51,6 +52,9 @@ public class ProtocolHandlers implements AutoCloseable { * @return the collection of protocol handlers */ public static ProtocolHandlers load(ServiceConfiguration conf) throws IOException { + if (conf.getMessagingProtocols().isEmpty()) { + return new ProtocolHandlers(Collections.emptyMap()); + } ProtocolHandlerDefinitions definitions = ProtocolHandlerUtils.searchForHandlers( conf.getProtocolHandlerDirectory(), conf.getNarExtractionDirectory()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupPublishLimiter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupPublishLimiter.java index 85e00bb2f87dc..0377ec86488d4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupPublishLimiter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupPublishLimiter.java @@ -135,7 +135,7 @@ public void update(long publishRateInMsgs, long publishRateInBytes) { public boolean tryAcquire(int numbers, long bytes) { return (publishRateLimiterOnMessage == null || publishRateLimiterOnMessage.tryAcquire(numbers)) - && (publishRateLimiterOnByte == null || publishRateLimiterOnByte.tryAcquire(bytes)); + && (publishRateLimiterOnByte == null || publishRateLimiterOnByte.tryAcquire(bytes)); } public void registerRateLimitFunction(String name, RateLimitFunction func) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java index 5ab81fcbbff27..20c35b4f7769c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java @@ -54,6 +54,7 @@ import org.apache.avro.io.DecoderFactory; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.admin.impl.PersistentTopicsBase; import org.apache.pulsar.broker.authentication.AuthenticationParameters; @@ -433,8 +434,10 @@ private CompletableFuture lookUpBrokerForTopic(TopicName partitionedTopicN } LookupResult result = optionalResult.get(); - if (result.getLookupData().getHttpUrl().equals(pulsar().getWebServiceAddress()) - || result.getLookupData().getHttpUrlTls().equals(pulsar().getWebServiceAddressTls())) { + String httpUrl = result.getLookupData().getHttpUrl(); + String httpUrlTls = result.getLookupData().getHttpUrlTls(); + if ((StringUtils.isNotBlank(httpUrl) && httpUrl.equals(pulsar().getWebServiceAddress())) + || (StringUtils.isNotBlank(httpUrlTls) && httpUrlTls.equals(pulsar().getWebServiceAddressTls()))) { // Current broker owns the topic, add to owning topic. if (log.isDebugEnabled()) { log.debug("Complete topic look up for rest produce message request for topic {}, " @@ -455,12 +458,10 @@ private CompletableFuture lookUpBrokerForTopic(TopicName partitionedTopicN } if (result.isRedirect()) { // Redirect lookup. - completeLookup(Pair.of(Arrays.asList(result.getLookupData().getHttpUrl(), - result.getLookupData().getHttpUrlTls()), false), redirectAddresses, future); + completeLookup(Pair.of(Arrays.asList(httpUrl, httpUrlTls), false), redirectAddresses, future); } else { // Found owner for topic. - completeLookup(Pair.of(Arrays.asList(result.getLookupData().getHttpUrl(), - result.getLookupData().getHttpUrlTls()), true), redirectAddresses, future); + completeLookup(Pair.of(Arrays.asList(httpUrl, httpUrlTls), true), redirectAddresses, future); } } }).exceptionally(exception -> { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java index 50f05f80e3f8b..0497a72acc64d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java @@ -49,6 +49,7 @@ import org.apache.pulsar.common.api.proto.ReplicatedSubscriptionsSnapshot; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.Markers; +import org.apache.pulsar.compaction.Compactor; import org.checkerframework.checker.nullness.qual.Nullable; @Slf4j @@ -174,34 +175,46 @@ public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i if (msgMetadata != null && msgMetadata.hasTxnidMostBits() && msgMetadata.hasTxnidLeastBits()) { if (Markers.isTxnMarker(msgMetadata)) { - // because consumer can receive message is smaller than maxReadPosition, - // so this marker is useless for this subscription - individualAcknowledgeMessageIfNeeded(entry.getPosition(), Collections.emptyMap()); - entries.set(i, null); - entry.release(); - continue; + if (cursor == null || !cursor.getName().equals(Compactor.COMPACTION_SUBSCRIPTION)) { + // because consumer can receive message is smaller than maxReadPosition, + // so this marker is useless for this subscription + individualAcknowledgeMessageIfNeeded(Collections.singletonList(entry.getPosition()), + Collections.emptyMap()); + entries.set(i, null); + entry.release(); + continue; + } } else if (((PersistentTopic) subscription.getTopic()) .isTxnAborted(new TxnID(msgMetadata.getTxnidMostBits(), msgMetadata.getTxnidLeastBits()), (PositionImpl) entry.getPosition())) { - individualAcknowledgeMessageIfNeeded(entry.getPosition(), Collections.emptyMap()); + individualAcknowledgeMessageIfNeeded(Collections.singletonList(entry.getPosition()), + Collections.emptyMap()); entries.set(i, null); entry.release(); continue; } } - if (msgMetadata == null || Markers.isServerOnlyMarker(msgMetadata)) { + if (msgMetadata == null || (Markers.isServerOnlyMarker(msgMetadata))) { PositionImpl pos = (PositionImpl) entry.getPosition(); // Message metadata was corrupted or the messages was a server-only marker if (Markers.isReplicatedSubscriptionSnapshotMarker(msgMetadata)) { + final int readerIndex = metadataAndPayload.readerIndex(); processReplicatedSubscriptionSnapshot(pos, metadataAndPayload); + metadataAndPayload.readerIndex(readerIndex); } - entries.set(i, null); - entry.release(); - individualAcknowledgeMessageIfNeeded(pos, Collections.emptyMap()); - continue; + // Deliver marker to __compaction cursor to avoid compaction task stuck, + // and filter out them when doing topic compaction. + if (msgMetadata == null || cursor == null + || !cursor.getName().equals(Compactor.COMPACTION_SUBSCRIPTION)) { + entries.set(i, null); + entry.release(); + individualAcknowledgeMessageIfNeeded(Collections.singletonList(pos), + Collections.emptyMap()); + continue; + } } else if (trackDelayedDelivery(entry.getLedgerId(), entry.getEntryId(), msgMetadata)) { // The message is marked for delayed delivery. Ignore for now. entries.set(i, null); @@ -213,12 +226,7 @@ public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i this.filterAcceptedMsgs.add(entryMsgCnt); } - totalEntries++; int batchSize = msgMetadata.getNumMessagesInBatch(); - totalMessages += batchSize; - totalBytes += metadataAndPayload.readableBytes(); - totalChunkedMessages += msgMetadata.hasChunkId() ? 1 : 0; - batchSizes.setBatchSize(i, batchSize); long[] ackSet = null; if (indexesAcks != null && cursor != null) { PositionImpl position = PositionImpl.get(entry.getLedgerId(), entry.getEntryId()); @@ -262,6 +270,12 @@ public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i } } + totalEntries++; + totalMessages += batchSize; + totalBytes += metadataAndPayload.readableBytes(); + totalChunkedMessages += msgMetadata.hasChunkId() ? 1 : 0; + batchSizes.setBatchSize(i, batchSize); + BrokerInterceptor interceptor = subscription.interceptor(); if (null != interceptor) { // keep for compatibility if users has implemented the old interface @@ -270,8 +284,7 @@ public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i } } if (CollectionUtils.isNotEmpty(entriesToFiltered)) { - subscription.acknowledgeMessage(entriesToFiltered, AckType.Individual, - Collections.emptyMap()); + individualAcknowledgeMessageIfNeeded(entriesToFiltered, Collections.emptyMap()); int filtered = entriesToFiltered.size(); Topic topic = subscription.getTopic(); @@ -300,9 +313,9 @@ public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i return totalEntries; } - private void individualAcknowledgeMessageIfNeeded(Position position, Map properties) { + private void individualAcknowledgeMessageIfNeeded(List positions, Map properties) { if (!(subscription instanceof PulsarCompactorSubscription)) { - subscription.acknowledgeMessage(Collections.singletonList(position), AckType.Individual, properties); + subscription.acknowledgeMessage(positions, AckType.Individual, properties); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java index 6dd296d16b53b..1b5b2824257b0 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java @@ -194,7 +194,7 @@ protected synchronized CompletableFuture closeProducerAsync() { return CompletableFuture.completedFuture(null); } CompletableFuture future = producer.closeAsync(); - future.thenRun(() -> { + return future.thenRun(() -> { STATE_UPDATER.set(this, State.Stopped); this.producer = null; // deactivate further read @@ -209,7 +209,6 @@ protected synchronized CompletableFuture closeProducerAsync() { brokerService.executor().schedule(this::closeProducerAsync, waitTimeMs, TimeUnit.MILLISECONDS); return null; }); - return future; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java index b15f8cbf0b848..d5d7e7ade5078 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java @@ -58,7 +58,6 @@ import org.apache.pulsar.broker.service.BrokerServiceException.TopicMigratedException; import org.apache.pulsar.broker.service.BrokerServiceException.TopicTerminatedException; import org.apache.pulsar.broker.service.plugin.EntryFilter; -import org.apache.pulsar.broker.service.schema.BookkeeperSchemaStorage; import org.apache.pulsar.broker.service.schema.SchemaRegistryService; import org.apache.pulsar.broker.service.schema.exceptions.IncompatibleSchemaException; import org.apache.pulsar.broker.stats.prometheus.metrics.Summary; @@ -674,21 +673,7 @@ private boolean allowAutoUpdateSchema() { @Override public CompletableFuture deleteSchema() { - String id = getSchemaId(); - SchemaRegistryService schemaRegistryService = brokerService.pulsar().getSchemaRegistryService(); - return BookkeeperSchemaStorage.ignoreUnrecoverableBKException(schemaRegistryService.getSchema(id)) - .thenCompose(schema -> { - if (schema != null) { - // It's different from `SchemasResource.deleteSchema` - // because when we delete a topic, the schema - // history is meaningless. But when we delete a schema of a topic, a new schema could be - // registered in the future. - log.info("Delete schema storage of id: {}", id); - return schemaRegistryService.deleteSchemaStorage(id); - } else { - return CompletableFuture.completedFuture(null); - } - }); + return brokerService.deleteSchema(TopicName.get(getName())); } @Override @@ -718,15 +703,14 @@ public CompletableFuture> addProducer(Producer producer, log.warn("[{}] Attempting to add producer to a terminated topic", topic); throw new TopicTerminatedException("Topic was already terminated"); } - internalAddProducer(producer); - - USAGE_COUNT_UPDATER.incrementAndGet(this); - if (log.isDebugEnabled()) { - log.debug("[{}] [{}] Added producer -- count: {}", topic, producer.getProducerName(), - USAGE_COUNT_UPDATER.get(this)); - } - - return CompletableFuture.completedFuture(producerEpoch); + return internalAddProducer(producer).thenApply(ignore -> { + USAGE_COUNT_UPDATER.incrementAndGet(this); + if (log.isDebugEnabled()) { + log.debug("[{}] [{}] Added producer -- count: {}", topic, producer.getProducerName(), + USAGE_COUNT_UPDATER.get(this)); + } + return producerEpoch; + }); } catch (BrokerServiceException e) { return FutureUtil.failedFuture(e); } finally { @@ -972,15 +956,17 @@ protected void checkTopicFenced() throws BrokerServiceException { } } - protected void internalAddProducer(Producer producer) throws BrokerServiceException { + protected CompletableFuture internalAddProducer(Producer producer) { if (isProducersExceeded(producer)) { log.warn("[{}] Attempting to add producer to topic which reached max producers limit", topic); - throw new BrokerServiceException.ProducerBusyException("Topic reached max producers limit"); + return CompletableFuture.failedFuture( + new BrokerServiceException.ProducerBusyException("Topic reached max producers limit")); } if (isSameAddressProducersExceeded(producer)) { log.warn("[{}] Attempting to add producer to topic which reached max same address producers limit", topic); - throw new BrokerServiceException.ProducerBusyException("Topic reached max same address producers limit"); + return CompletableFuture.failedFuture( + new BrokerServiceException.ProducerBusyException("Topic reached max same address producers limit")); } if (log.isDebugEnabled()) { @@ -989,27 +975,46 @@ protected void internalAddProducer(Producer producer) throws BrokerServiceExcept Producer existProducer = producers.putIfAbsent(producer.getProducerName(), producer); if (existProducer != null) { - tryOverwriteOldProducer(existProducer, producer); + return tryOverwriteOldProducer(existProducer, producer); } else if (!producer.isRemote()) { USER_CREATED_PRODUCER_COUNTER_UPDATER.incrementAndGet(this); } + return CompletableFuture.completedFuture(null); } - private void tryOverwriteOldProducer(Producer oldProducer, Producer newProducer) - throws BrokerServiceException { - if (newProducer.isSuccessorTo(oldProducer) && !isUserProvidedProducerName(oldProducer) - && !isUserProvidedProducerName(newProducer)) { + private CompletableFuture tryOverwriteOldProducer(Producer oldProducer, Producer newProducer) { + if (newProducer.isSuccessorTo(oldProducer)) { oldProducer.close(false); if (!producers.replace(newProducer.getProducerName(), oldProducer, newProducer)) { // Met concurrent update, throw exception here so that client can try reconnect later. - throw new BrokerServiceException.NamingException("Producer with name '" + newProducer.getProducerName() - + "' replace concurrency error"); + return CompletableFuture.failedFuture(new BrokerServiceException.NamingException("Producer with name '" + + newProducer.getProducerName() + "' replace concurrency error")); } else { handleProducerRemoved(oldProducer); + return CompletableFuture.completedFuture(null); } } else { - throw new BrokerServiceException.NamingException( - "Producer with name '" + newProducer.getProducerName() + "' is already connected to topic"); + // If a producer with the same name tries to use a new connection, async check the old connection is + // available. The producers related the connection that not available are automatically cleaned up. + if (!Objects.equals(oldProducer.getCnx(), newProducer.getCnx())) { + return oldProducer.getCnx().checkConnectionLiveness().thenCompose(previousIsActive -> { + if (previousIsActive) { + return CompletableFuture.failedFuture(new BrokerServiceException.NamingException( + "Producer with name '" + newProducer.getProducerName() + + "' is already connected to topic")); + } else { + // If the connection of the previous producer is not active, the method + // "cnx().checkConnectionLiveness()" will trigger the close for it and kick off the previous + // producer. So try to add current producer again. + // The recursive call will be stopped by these two case(This prevents infinite call): + // 1. add current producer success. + // 2. once another same name producer registered. + return internalAddProducer(newProducer); + } + }); + } + return CompletableFuture.failedFuture(new BrokerServiceException.NamingException( + "Producer with name '" + newProducer.getProducerName() + "' is already connected to topic")); } } @@ -1281,7 +1286,7 @@ protected boolean isExceedMaximumMessageSize(int size, PublishContext publishCon /** * update topic publish dispatcher for this topic. */ - public void updatePublishDispatcher() { + public void updatePublishRateLimiter() { synchronized (topicPublishRateLimiterLock) { PublishRate publishRate = topicPolicies.getPublishRate().get(); if (publishRate.publishThrottlingRateInByte > 0 || publishRate.publishThrottlingRateInMsg > 0) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java index 6c48e4d9ae89f..e08417a6498c9 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java @@ -49,6 +49,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; @@ -70,6 +71,7 @@ import java.util.function.Consumer; import java.util.function.Predicate; import javax.annotation.Nonnull; +import javax.annotation.Nullable; import lombok.AccessLevel; import lombok.AllArgsConstructor; import lombok.Getter; @@ -101,7 +103,7 @@ import org.apache.pulsar.broker.intercept.BrokerInterceptor; import org.apache.pulsar.broker.intercept.ManagedLedgerInterceptorImpl; import org.apache.pulsar.broker.loadbalance.LoadManager; -import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl; +import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.resources.DynamicConfigurationResources; import org.apache.pulsar.broker.resources.LocalPoliciesResources; @@ -159,6 +161,7 @@ import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.policies.data.TopicType; import org.apache.pulsar.common.policies.data.stats.TopicStatsImpl; +import org.apache.pulsar.common.protocol.schema.SchemaVersion; import org.apache.pulsar.common.stats.Metrics; import org.apache.pulsar.common.util.FieldParser; import org.apache.pulsar.common.util.FutureUtil; @@ -374,8 +377,8 @@ public BrokerService(PulsarService pulsar, EventLoopGroup eventLoopGroup) throws if (pulsar.getConfiguration().getMaxUnackedMessagesPerBroker() > 0 && pulsar.getConfiguration().getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked() > 0.0) { this.maxUnackedMessages = pulsar.getConfiguration().getMaxUnackedMessagesPerBroker(); - this.maxUnackedMsgsPerDispatcher = (int) ((maxUnackedMessages - * pulsar.getConfiguration().getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked()) / 100); + this.maxUnackedMsgsPerDispatcher = (int) (maxUnackedMessages + * pulsar.getConfiguration().getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked()); log.info("Enabling per-broker unack-message limit {} and dispatcher-limit {} on blocked-broker", maxUnackedMessages, maxUnackedMsgsPerDispatcher); // block misbehaving dispatcher by checking periodically @@ -822,6 +825,7 @@ public CompletableFuture closeAsync() { for (EventLoopGroup group : protocolHandlersWorkerGroups) { shutdownEventLoops.add(shutdownEventLoopGracefully(group)); } + CompletableFuture shutdownFuture = CompletableFuture.allOf(shutdownEventLoops.toArray(new CompletableFuture[0])) .handle((v, t) -> { @@ -832,7 +836,7 @@ public CompletableFuture closeAsync() { } return null; }) - .thenCompose(__ -> { + .thenComposeAsync(__ -> { log.info("Continuing to second phase in shutdown."); List> asyncCloseFutures = new ArrayList<>(); @@ -896,6 +900,12 @@ public CompletableFuture closeAsync() { return null; }); return combined; + }, runnable -> { + // run the 2nd phase of the shutdown in a separate thread + Thread thread = new Thread(runnable); + thread.setName("BrokerService-shutdown-phase2"); + thread.setDaemon(false); + thread.start(); }); FutureUtil.whenCancelledOrTimedOut(shutdownFuture, () -> cancellableDownstreamFutureReference .thenAccept(future -> future.cancel(false))); @@ -1041,19 +1051,50 @@ public CompletableFuture> getTopic(final TopicName topicName, bo } final boolean isPersistentTopic = topicName.getDomain().equals(TopicDomain.persistent); if (isPersistentTopic) { - return topics.computeIfAbsent(topicName.toString(), (tpName) -> { - if (topicName.isPartitioned()) { - return fetchPartitionedTopicMetadataAsync(TopicName.get(topicName.getPartitionedTopicName())) - .thenCompose((metadata) -> { - // Allow crate non-partitioned persistent topic that name includes `partition` - if (metadata.partitions == 0 - || topicName.getPartitionIndex() < metadata.partitions) { - return loadOrCreatePersistentTopic(tpName, createIfMissing, properties); - } - return CompletableFuture.completedFuture(Optional.empty()); - }); + return pulsar.getPulsarResources().getTopicResources().persistentTopicExists(topicName) + .thenCompose(exists -> { + if (!exists && !createIfMissing) { + return CompletableFuture.completedFuture(Optional.empty()); } - return loadOrCreatePersistentTopic(tpName, createIfMissing, properties); + return getTopicPoliciesBypassSystemTopic(topicName).exceptionally(ex -> { + final Throwable rc = FutureUtil.unwrapCompletionException(ex); + final String errorInfo = String.format("Topic creation encountered an exception by initialize" + + " topic policies service. topic_name=%s error_message=%s", topicName, + rc.getMessage()); + log.error(errorInfo, rc); + throw FutureUtil.wrapToCompletionException(new ServiceUnitNotReadyException(errorInfo)); + }).thenCompose(optionalTopicPolicies -> { + final TopicPolicies topicPolicies = optionalTopicPolicies.orElse(null); + return topics.computeIfAbsent(topicName.toString(), (tpName) -> { + if (topicName.isPartitioned()) { + final TopicName topicNameEntity = TopicName.get(topicName.getPartitionedTopicName()); + return fetchPartitionedTopicMetadataAsync(topicNameEntity) + .thenCompose((metadata) -> { + // Allow crate non-partitioned persistent topic that name includes + // `partition` + if (metadata.partitions == 0 + || topicName.getPartitionIndex() < metadata.partitions) { + return loadOrCreatePersistentTopic(tpName, createIfMissing, + properties, topicPolicies); + } + final String errorMsg = + String.format("Illegal topic partition name %s with max allowed " + + "%d partitions", topicName, metadata.partitions); + log.warn(errorMsg); + return FutureUtil.failedFuture( + new BrokerServiceException.NotAllowedException(errorMsg)); + }); + } + return loadOrCreatePersistentTopic(tpName, createIfMissing, properties, topicPolicies); + }).thenCompose(optionalTopic -> { + if (!optionalTopic.isPresent() && createIfMissing) { + log.warn("[{}] Try to recreate the topic with createIfMissing=true " + + "but the returned topic is empty", topicName); + return getTopic(topicName, createIfMissing, properties); + } + return CompletableFuture.completedFuture(optionalTopic); + }); + }); }); } else { return topics.computeIfAbsent(topicName.toString(), (name) -> { @@ -1107,6 +1148,18 @@ public CompletableFuture> getTopic(final TopicName topicName, bo } } + private CompletableFuture> getTopicPoliciesBypassSystemTopic(@Nonnull TopicName topicName) { + Objects.requireNonNull(topicName); + final ServiceConfiguration serviceConfiguration = pulsar.getConfiguration(); + if (serviceConfiguration.isSystemTopicEnabled() && serviceConfiguration.isTopicLevelPoliciesEnabled() + && !NamespaceService.isSystemServiceNamespace(topicName.getNamespace()) + && !SystemTopicNames.isTopicPoliciesSystemTopic(topicName.toString())) { + return pulsar.getTopicPoliciesService().getTopicPoliciesAsync(topicName); + } else { + return CompletableFuture.completedFuture(Optional.empty()); + } + } + public CompletableFuture deleteTopic(String topic, boolean forceDelete) { topicEventsDispatcher.notify(topic, TopicEvent.DELETE, EventStage.BEFORE); CompletableFuture result = deleteTopicInternal(topic, forceDelete); @@ -1156,26 +1209,33 @@ private CompletableFuture deleteTopicInternal(String topic, boolean forceD CompletableFuture future = new CompletableFuture<>(); CompletableFuture deleteTopicAuthenticationFuture = new CompletableFuture<>(); deleteTopicAuthenticationWithRetry(topic, deleteTopicAuthenticationFuture, 5); - - deleteTopicAuthenticationFuture.whenComplete((v, ex) -> { + deleteTopicAuthenticationFuture + .thenCompose(__ -> deleteSchema(tn)) + .thenCompose(__ -> { + if (!SystemTopicNames.isTopicPoliciesSystemTopic(topic) + && getPulsar().getConfiguration().isSystemTopicEnabled()) { + return deleteTopicPolicies(tn); + } + return CompletableFuture.completedFuture(null); + }).whenComplete((v, ex) -> { if (ex != null) { future.completeExceptionally(ex); return; } CompletableFuture mlConfigFuture = getManagedLedgerConfig(topicName); managedLedgerFactory.asyncDelete(tn.getPersistenceNamingEncoding(), - mlConfigFuture, new DeleteLedgerCallback() { - @Override - public void deleteLedgerComplete(Object ctx) { - future.complete(null); - } + mlConfigFuture, new DeleteLedgerCallback() { + @Override + public void deleteLedgerComplete(Object ctx) { + future.complete(null); + } - @Override - public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { - future.completeExceptionally(exception); - } - }, null); - }); + @Override + public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { + future.completeExceptionally(exception); + } + }, null); + }); return future; } @@ -1489,7 +1549,7 @@ public PulsarAdmin getClusterPulsarAdmin(String cluster, Optional c * @throws RuntimeException */ protected CompletableFuture> loadOrCreatePersistentTopic(final String topic, - boolean createIfMissing, Map properties) throws RuntimeException { + boolean createIfMissing, Map properties, @Nullable TopicPolicies topicPolicies) { final CompletableFuture> topicFuture = FutureUtil.createFutureWithTimeout( Duration.ofSeconds(pulsar.getConfiguration().getTopicLoadTimeoutSeconds()), executor(), () -> FAILED_TO_LOAD_TOPIC_TIMEOUT_EXCEPTION); @@ -1507,7 +1567,8 @@ protected CompletableFuture> loadOrCreatePersistentTopic(final S final Semaphore topicLoadSemaphore = topicLoadRequestSemaphore.get(); if (topicLoadSemaphore.tryAcquire()) { - checkOwnershipAndCreatePersistentTopic(topic, createIfMissing, topicFuture, properties); + checkOwnershipAndCreatePersistentTopic(topic, createIfMissing, topicFuture, + properties, topicPolicies); topicFuture.handle((persistentTopic, ex) -> { // release permit and process pending topic topicLoadSemaphore.release(); @@ -1516,7 +1577,7 @@ protected CompletableFuture> loadOrCreatePersistentTopic(final S }); } else { pendingTopicLoadingQueue.add(new TopicLoadingContext(topic, - createIfMissing, topicFuture, properties)); + createIfMissing, topicFuture, properties, topicPolicies)); if (log.isDebugEnabled()) { log.debug("topic-loading for {} added into pending queue", topic); } @@ -1557,7 +1618,7 @@ protected CompletableFuture> fetchTopicPropertiesAsync(Topic private void checkOwnershipAndCreatePersistentTopic(final String topic, boolean createIfMissing, CompletableFuture> topicFuture, - Map properties) { + Map properties, @Nullable TopicPolicies topicPolicies) { TopicName topicName = TopicName.get(topic); pulsar.getNamespaceService().isServiceUnitActiveAsync(topicName) .thenAccept(isActive -> { @@ -1571,7 +1632,8 @@ private void checkOwnershipAndCreatePersistentTopic(final String topic, boolean } propertiesFuture.thenAccept(finalProperties -> //TODO add topicName in properties? - createPersistentTopic(topic, createIfMissing, topicFuture, finalProperties) + createPersistentTopic(topic, createIfMissing, topicFuture, + finalProperties, topicPolicies) ).exceptionally(throwable -> { log.warn("[{}] Read topic property failed", topic, throwable); pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture)); @@ -1596,12 +1658,12 @@ private void checkOwnershipAndCreatePersistentTopic(final String topic, boolean public void createPersistentTopic0(final String topic, boolean createIfMissing, CompletableFuture> topicFuture, Map properties) { - createPersistentTopic(topic, createIfMissing, topicFuture, properties); + createPersistentTopic(topic, createIfMissing, topicFuture, properties, null); } private void createPersistentTopic(final String topic, boolean createIfMissing, CompletableFuture> topicFuture, - Map properties) { + Map properties, @Nullable TopicPolicies topicPolicies) { TopicName topicName = TopicName.get(topic); final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); @@ -1689,16 +1751,14 @@ public void openLedgerComplete(ManagedLedger ledger, Object ctx) { // Check create persistent topic timeout. log.warn("{} future is already completed with failure {}, closing the" + " topic", topic, FutureUtil.getException(topicFuture)); - persistentTopic.getTransactionBuffer() - .closeAsync() - .exceptionally(t -> { - log.error("[{}] Close transactionBuffer failed", topic, t); - return null; - }); - persistentTopic.stopReplProducers() - .whenCompleteAsync((v, exception) -> { - topics.remove(topic, topicFuture); - }, executor()); + executor().submit(() -> { + persistentTopic.close().whenComplete((ignore, ex) -> { + if (ex != null) { + log.warn("[{}] Get an error when closing topic.", + topic, ex); + } + }); + }); } else { addTopicToStatsMaps(topicName, persistentTopic); topicFuture.complete(Optional.of(persistentTopic)); @@ -1707,16 +1767,15 @@ public void openLedgerComplete(ManagedLedger ledger, Object ctx) { .exceptionally((ex) -> { log.warn("Replication or dedup check failed." + " Removing topic from topics list {}, {}", topic, ex); - persistentTopic.getTransactionBuffer() - .closeAsync() - .exceptionally(t -> { - log.error("[{}] Close transactionBuffer failed", topic, t); - return null; - }); - persistentTopic.stopReplProducers().whenCompleteAsync((v, exception) -> { - topics.remove(topic, topicFuture); - topicFuture.completeExceptionally(ex); - }, executor()); + executor().submit(() -> { + persistentTopic.close().whenComplete((ignore, closeEx) -> { + if (closeEx != null) { + log.warn("[{}] Get an error when closing topic.", + topic, closeEx); + } + topicFuture.completeExceptionally(ex); + }); + }); return null; }); } catch (PulsarServerException e) { @@ -1738,7 +1797,7 @@ public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { topicFuture.completeExceptionally(new PersistenceException(exception)); } } - }, () -> isTopicNsOwnedByBroker(topicName), null); + }, () -> isTopicNsOwnedByBrokerAsync(topicName), null); }).exceptionally((exception) -> { log.warn("[{}] Failed to get topic configuration: {}", topic, exception.getMessage(), exception); @@ -1750,165 +1809,176 @@ public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { }); } - public CompletableFuture getManagedLedgerConfig(TopicName topicName) { + public CompletableFuture getManagedLedgerConfig(@Nonnull TopicName topicName) { + final CompletableFuture> topicPoliciesFuture = + getTopicPoliciesBypassSystemTopic(topicName); + return topicPoliciesFuture.thenCompose(optionalTopicPolicies -> + getManagedLedgerConfig(topicName, optionalTopicPolicies.orElse(null))); + } + + private CompletableFuture getManagedLedgerConfig(@Nonnull TopicName topicName, + @Nullable TopicPolicies topicPolicies) { + requireNonNull(topicName); NamespaceName namespace = topicName.getNamespaceObject(); ServiceConfiguration serviceConfig = pulsar.getConfiguration(); NamespaceResources nsr = pulsar.getPulsarResources().getNamespaceResources(); LocalPoliciesResources lpr = pulsar.getPulsarResources().getLocalPolicies(); - return nsr.getPoliciesAsync(namespace) - .thenCombine(lpr.getLocalPoliciesAsync(namespace), (policies, localPolicies) -> { - PersistencePolicies persistencePolicies = null; - RetentionPolicies retentionPolicies = null; - OffloadPoliciesImpl topicLevelOffloadPolicies = null; - - if (pulsar.getConfig().isTopicLevelPoliciesEnabled() - && !NamespaceService.isSystemServiceNamespace(namespace.toString())) { - final TopicPolicies topicPolicies = pulsar.getTopicPoliciesService() - .getTopicPoliciesIfExists(topicName); - if (topicPolicies != null) { - persistencePolicies = topicPolicies.getPersistence(); - retentionPolicies = topicPolicies.getRetentionPolicies(); - topicLevelOffloadPolicies = topicPolicies.getOffloadPolicies(); - } - } + final CompletableFuture> nsPolicies = nsr.getPoliciesAsync(namespace); + final CompletableFuture> lcPolicies = lpr.getLocalPoliciesAsync(namespace); + return nsPolicies.thenCombine(lcPolicies, (policies, localPolicies) -> { + PersistencePolicies persistencePolicies = null; + RetentionPolicies retentionPolicies = null; + OffloadPoliciesImpl topicLevelOffloadPolicies = null; + if (topicPolicies != null) { + persistencePolicies = topicPolicies.getPersistence(); + retentionPolicies = topicPolicies.getRetentionPolicies(); + topicLevelOffloadPolicies = topicPolicies.getOffloadPolicies(); + } - if (persistencePolicies == null) { - persistencePolicies = policies.map(p -> p.persistence).orElseGet( - () -> new PersistencePolicies(serviceConfig.getManagedLedgerDefaultEnsembleSize(), - serviceConfig.getManagedLedgerDefaultWriteQuorum(), - serviceConfig.getManagedLedgerDefaultAckQuorum(), - serviceConfig.getManagedLedgerDefaultMarkDeleteRateLimit())); - } + if (persistencePolicies == null) { + persistencePolicies = policies.map(p -> p.persistence).orElseGet( + () -> new PersistencePolicies(serviceConfig.getManagedLedgerDefaultEnsembleSize(), + serviceConfig.getManagedLedgerDefaultWriteQuorum(), + serviceConfig.getManagedLedgerDefaultAckQuorum(), + serviceConfig.getManagedLedgerDefaultMarkDeleteRateLimit())); + } - if (retentionPolicies == null) { - retentionPolicies = policies.map(p -> p.retention_policies).orElseGet( - () -> new RetentionPolicies(serviceConfig.getDefaultRetentionTimeInMinutes(), - serviceConfig.getDefaultRetentionSizeInMB()) - ); + if (retentionPolicies == null) { + if (SystemTopicNames.isSystemTopic(topicName)) { + if (log.isDebugEnabled()) { + log.debug("{} Disable data retention policy for system topic.", topicName); } + retentionPolicies = new RetentionPolicies(0, 0); + } else { + retentionPolicies = policies.map(p -> p.retention_policies).orElseGet( + () -> new RetentionPolicies(serviceConfig.getDefaultRetentionTimeInMinutes(), + serviceConfig.getDefaultRetentionSizeInMB()) + ); + } + } - ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); - managedLedgerConfig.setEnsembleSize(persistencePolicies.getBookkeeperEnsemble()); - managedLedgerConfig.setWriteQuorumSize(persistencePolicies.getBookkeeperWriteQuorum()); - managedLedgerConfig.setAckQuorumSize(persistencePolicies.getBookkeeperAckQuorum()); - - if (serviceConfig.isStrictBookieAffinityEnabled()) { - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyClassName( - IsolatedBookieEnsemblePlacementPolicy.class); - if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) { - Map properties = new HashMap<>(); - properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, - localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupPrimary()); - properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, - localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupSecondary()); - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); - } else if (isSystemTopic(topicName)) { - Map properties = new HashMap<>(); - properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, "*"); - properties.put(IsolatedBookieEnsemblePlacementPolicy - .SECONDARY_ISOLATION_BOOKIE_GROUPS, "*"); - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); - } else { - Map properties = new HashMap<>(); - properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, ""); - properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, ""); - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); - } - } else { - if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) { - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyClassName( - IsolatedBookieEnsemblePlacementPolicy.class); - Map properties = new HashMap<>(); - properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, - localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupPrimary()); - properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, - localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupSecondary()); - managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); - } - } - - managedLedgerConfig.setThrottleMarkDelete(persistencePolicies.getManagedLedgerMaxMarkDeleteRate()); - managedLedgerConfig.setDigestType(serviceConfig.getManagedLedgerDigestType()); - managedLedgerConfig.setPassword(serviceConfig.getManagedLedgerPassword()); - - managedLedgerConfig - .setMaxUnackedRangesToPersist(serviceConfig.getManagedLedgerMaxUnackedRangesToPersist()); - managedLedgerConfig.setPersistentUnackedRangesWithMultipleEntriesEnabled( - serviceConfig.isPersistentUnackedRangesWithMultipleEntriesEnabled()); - managedLedgerConfig.setMaxUnackedRangesToPersistInMetadataStore( - serviceConfig.getManagedLedgerMaxUnackedRangesToPersistInMetadataStore()); - managedLedgerConfig.setMaxEntriesPerLedger(serviceConfig.getManagedLedgerMaxEntriesPerLedger()); - managedLedgerConfig - .setMinimumRolloverTime(serviceConfig.getManagedLedgerMinLedgerRolloverTimeMinutes(), - TimeUnit.MINUTES); - managedLedgerConfig - .setMaximumRolloverTime(serviceConfig.getManagedLedgerMaxLedgerRolloverTimeMinutes(), - TimeUnit.MINUTES); - managedLedgerConfig.setMaxSizePerLedgerMb(serviceConfig.getManagedLedgerMaxSizePerLedgerMbytes()); - - managedLedgerConfig.setMetadataOperationsTimeoutSeconds( - serviceConfig.getManagedLedgerMetadataOperationsTimeoutSeconds()); - managedLedgerConfig - .setReadEntryTimeoutSeconds(serviceConfig.getManagedLedgerReadEntryTimeoutSeconds()); - managedLedgerConfig - .setAddEntryTimeoutSeconds(serviceConfig.getManagedLedgerAddEntryTimeoutSeconds()); - managedLedgerConfig.setMetadataEnsembleSize(serviceConfig.getManagedLedgerDefaultEnsembleSize()); - managedLedgerConfig.setUnackedRangesOpenCacheSetEnabled( - serviceConfig.isManagedLedgerUnackedRangesOpenCacheSetEnabled()); - managedLedgerConfig.setMetadataWriteQuorumSize(serviceConfig.getManagedLedgerDefaultWriteQuorum()); - managedLedgerConfig.setMetadataAckQuorumSize(serviceConfig.getManagedLedgerDefaultAckQuorum()); - managedLedgerConfig - .setMetadataMaxEntriesPerLedger(serviceConfig.getManagedLedgerCursorMaxEntriesPerLedger()); + ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); + managedLedgerConfig.setEnsembleSize(persistencePolicies.getBookkeeperEnsemble()); + managedLedgerConfig.setWriteQuorumSize(persistencePolicies.getBookkeeperWriteQuorum()); + managedLedgerConfig.setAckQuorumSize(persistencePolicies.getBookkeeperAckQuorum()); + + if (serviceConfig.isStrictBookieAffinityEnabled()) { + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyClassName( + IsolatedBookieEnsemblePlacementPolicy.class); + if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) { + Map properties = new HashMap<>(); + properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, + localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupPrimary()); + properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, + localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupSecondary()); + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); + } else if (isSystemTopic(topicName)) { + Map properties = new HashMap<>(); + properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, "*"); + properties.put(IsolatedBookieEnsemblePlacementPolicy + .SECONDARY_ISOLATION_BOOKIE_GROUPS, "*"); + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); + } else { + Map properties = new HashMap<>(); + properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, ""); + properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, ""); + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); + } + } else { + if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) { + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyClassName( + IsolatedBookieEnsemblePlacementPolicy.class); + Map properties = new HashMap<>(); + properties.put(IsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, + localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupPrimary()); + properties.put(IsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, + localPolicies.get().bookieAffinityGroup.getBookkeeperAffinityGroupSecondary()); + managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties); + } + } - managedLedgerConfig - .setLedgerRolloverTimeout(serviceConfig.getManagedLedgerCursorRolloverTimeInSeconds()); - managedLedgerConfig - .setRetentionTime(retentionPolicies.getRetentionTimeInMinutes(), TimeUnit.MINUTES); - managedLedgerConfig.setRetentionSizeInMB(retentionPolicies.getRetentionSizeInMB()); - managedLedgerConfig.setAutoSkipNonRecoverableData(serviceConfig.isAutoSkipNonRecoverableData()); - managedLedgerConfig.setLazyCursorRecovery(serviceConfig.isLazyCursorRecovery()); - managedLedgerConfig.setInactiveLedgerRollOverTime( - serviceConfig.getManagedLedgerInactiveLedgerRolloverTimeSeconds(), TimeUnit.SECONDS); - managedLedgerConfig.setCacheEvictionByMarkDeletedPosition( - serviceConfig.isCacheEvictionByMarkDeletedPosition()); - managedLedgerConfig.setMinimumBacklogCursorsForCaching( - serviceConfig.getManagedLedgerMinimumBacklogCursorsForCaching()); - managedLedgerConfig.setMinimumBacklogEntriesForCaching( - serviceConfig.getManagedLedgerMinimumBacklogEntriesForCaching()); - managedLedgerConfig.setMaxBacklogBetweenCursorsForCaching( - serviceConfig.getManagedLedgerMaxBacklogBetweenCursorsForCaching()); - - OffloadPoliciesImpl nsLevelOffloadPolicies = - (OffloadPoliciesImpl) policies.map(p -> p.offload_policies).orElse(null); - OffloadPoliciesImpl offloadPolicies = OffloadPoliciesImpl.mergeConfiguration( - topicLevelOffloadPolicies, - OffloadPoliciesImpl.oldPoliciesCompatible(nsLevelOffloadPolicies, policies.orElse(null)), - getPulsar().getConfig().getProperties()); - if (NamespaceService.isSystemServiceNamespace(namespace.toString())) { - managedLedgerConfig.setLedgerOffloader(NullLedgerOffloader.INSTANCE); - } else { - if (topicLevelOffloadPolicies != null) { - try { - LedgerOffloader topicLevelLedgerOffLoader = - pulsar().createManagedLedgerOffloader(offloadPolicies); - managedLedgerConfig.setLedgerOffloader(topicLevelLedgerOffLoader); - } catch (PulsarServerException e) { - throw new RuntimeException(e); - } - } else { - //If the topic level policy is null, use the namespace level - managedLedgerConfig - .setLedgerOffloader(pulsar.getManagedLedgerOffloader(namespace, offloadPolicies)); - } + managedLedgerConfig.setThrottleMarkDelete(persistencePolicies.getManagedLedgerMaxMarkDeleteRate()); + managedLedgerConfig.setDigestType(serviceConfig.getManagedLedgerDigestType()); + managedLedgerConfig.setPassword(serviceConfig.getManagedLedgerPassword()); + + managedLedgerConfig + .setMaxUnackedRangesToPersist(serviceConfig.getManagedLedgerMaxUnackedRangesToPersist()); + managedLedgerConfig.setPersistentUnackedRangesWithMultipleEntriesEnabled( + serviceConfig.isPersistentUnackedRangesWithMultipleEntriesEnabled()); + managedLedgerConfig.setMaxUnackedRangesToPersistInMetadataStore( + serviceConfig.getManagedLedgerMaxUnackedRangesToPersistInMetadataStore()); + managedLedgerConfig.setMaxEntriesPerLedger(serviceConfig.getManagedLedgerMaxEntriesPerLedger()); + managedLedgerConfig + .setMinimumRolloverTime(serviceConfig.getManagedLedgerMinLedgerRolloverTimeMinutes(), + TimeUnit.MINUTES); + managedLedgerConfig + .setMaximumRolloverTime(serviceConfig.getManagedLedgerMaxLedgerRolloverTimeMinutes(), + TimeUnit.MINUTES); + managedLedgerConfig.setMaxSizePerLedgerMb(serviceConfig.getManagedLedgerMaxSizePerLedgerMbytes()); + + managedLedgerConfig.setMetadataOperationsTimeoutSeconds( + serviceConfig.getManagedLedgerMetadataOperationsTimeoutSeconds()); + managedLedgerConfig + .setReadEntryTimeoutSeconds(serviceConfig.getManagedLedgerReadEntryTimeoutSeconds()); + managedLedgerConfig + .setAddEntryTimeoutSeconds(serviceConfig.getManagedLedgerAddEntryTimeoutSeconds()); + managedLedgerConfig.setMetadataEnsembleSize(serviceConfig.getManagedLedgerDefaultEnsembleSize()); + managedLedgerConfig.setUnackedRangesOpenCacheSetEnabled( + serviceConfig.isManagedLedgerUnackedRangesOpenCacheSetEnabled()); + managedLedgerConfig.setMetadataWriteQuorumSize(serviceConfig.getManagedLedgerDefaultWriteQuorum()); + managedLedgerConfig.setMetadataAckQuorumSize(serviceConfig.getManagedLedgerDefaultAckQuorum()); + managedLedgerConfig + .setMetadataMaxEntriesPerLedger(serviceConfig.getManagedLedgerCursorMaxEntriesPerLedger()); + + managedLedgerConfig + .setLedgerRolloverTimeout(serviceConfig.getManagedLedgerCursorRolloverTimeInSeconds()); + managedLedgerConfig + .setRetentionTime(retentionPolicies.getRetentionTimeInMinutes(), TimeUnit.MINUTES); + managedLedgerConfig.setRetentionSizeInMB(retentionPolicies.getRetentionSizeInMB()); + managedLedgerConfig.setAutoSkipNonRecoverableData(serviceConfig.isAutoSkipNonRecoverableData()); + managedLedgerConfig.setLazyCursorRecovery(serviceConfig.isLazyCursorRecovery()); + managedLedgerConfig.setInactiveLedgerRollOverTime( + serviceConfig.getManagedLedgerInactiveLedgerRolloverTimeSeconds(), TimeUnit.SECONDS); + managedLedgerConfig.setCacheEvictionByMarkDeletedPosition( + serviceConfig.isCacheEvictionByMarkDeletedPosition()); + managedLedgerConfig.setMinimumBacklogCursorsForCaching( + serviceConfig.getManagedLedgerMinimumBacklogCursorsForCaching()); + managedLedgerConfig.setMinimumBacklogEntriesForCaching( + serviceConfig.getManagedLedgerMinimumBacklogEntriesForCaching()); + managedLedgerConfig.setMaxBacklogBetweenCursorsForCaching( + serviceConfig.getManagedLedgerMaxBacklogBetweenCursorsForCaching()); + + OffloadPoliciesImpl nsLevelOffloadPolicies = + (OffloadPoliciesImpl) policies.map(p -> p.offload_policies).orElse(null); + OffloadPoliciesImpl offloadPolicies = OffloadPoliciesImpl.mergeConfiguration( + topicLevelOffloadPolicies, + OffloadPoliciesImpl.oldPoliciesCompatible(nsLevelOffloadPolicies, policies.orElse(null)), + getPulsar().getConfig().getProperties()); + if (NamespaceService.isSystemServiceNamespace(namespace.toString())) { + managedLedgerConfig.setLedgerOffloader(NullLedgerOffloader.INSTANCE); + } else { + if (topicLevelOffloadPolicies != null) { + try { + LedgerOffloader topicLevelLedgerOffLoader = + pulsar().createManagedLedgerOffloader(offloadPolicies); + managedLedgerConfig.setLedgerOffloader(topicLevelLedgerOffLoader); + } catch (PulsarServerException e) { + throw new RuntimeException(e); } + } else { + //If the topic level policy is null, use the namespace level + managedLedgerConfig + .setLedgerOffloader(pulsar.getManagedLedgerOffloader(namespace, offloadPolicies)); + } + } - managedLedgerConfig.setDeletionAtBatchIndexLevelEnabled( - serviceConfig.isAcknowledgmentAtBatchIndexLevelEnabled()); - managedLedgerConfig.setNewEntriesCheckDelayInMillis( - serviceConfig.getManagedLedgerNewEntriesCheckDelayInMillis()); - return managedLedgerConfig; - }); + managedLedgerConfig.setDeletionAtBatchIndexLevelEnabled( + serviceConfig.isAcknowledgmentAtBatchIndexLevelEnabled()); + managedLedgerConfig.setNewEntriesCheckDelayInMillis( + serviceConfig.getManagedLedgerNewEntriesCheckDelayInMillis()); + return managedLedgerConfig; + }); } private void addTopicToStatsMaps(TopicName topicName, Topic topic) { @@ -2119,13 +2189,16 @@ public void monitorBacklogQuota() { }); } - public boolean isTopicNsOwnedByBroker(TopicName topicName) { - try { - return pulsar.getNamespaceService().isServiceUnitOwned(topicName); - } catch (Exception e) { - log.warn("Failed to check the ownership of the topic: {}, {}", topicName, e.getMessage()); - } - return false; + public CompletableFuture isTopicNsOwnedByBrokerAsync(TopicName topicName) { + return pulsar.getNamespaceService().isServiceUnitOwnedAsync(topicName) + .handle((hasOwnership, t) -> { + if (t == null) { + return hasOwnership; + } else { + log.warn("Failed to check the ownership of the topic: {}, {}", topicName, t.getMessage()); + return false; + } + }); } public CompletableFuture checkTopicNsOwnership(final String topic) { @@ -2138,7 +2211,7 @@ public CompletableFuture checkTopicNsOwnership(final String topic) { } else { String msg = String.format("Namespace bundle for topic (%s) not served by this instance:%s. " + "Please redo the lookup. Request is denied: namespace=%s", - topic, pulsar.getLookupServiceAddress(), topicName.getNamespace()); + topic, pulsar.getBrokerId(), topicName.getNamespace()); log.warn(msg); return FutureUtil.failedFuture(new ServiceUnitNotReadyException(msg)); } @@ -2176,6 +2249,21 @@ private CompletableFuture unloadServiceUnit(NamespaceBundle serviceUnit if (serviceUnit.includes(topicName)) { // Topic needs to be unloaded log.info("[{}] Unloading topic", topicName); + if (topicFuture.isCompletedExceptionally()) { + try { + topicFuture.get(); + } catch (InterruptedException | ExecutionException ex) { + if (ex.getCause() instanceof ServiceUnitNotReadyException) { + // Topic was already unloaded + if (log.isDebugEnabled()) { + log.debug("[{}] Topic was already unloaded", topicName); + } + return; + } else { + log.warn("[{}] Got exception when closing topic", topicName, ex); + } + } + } closeFutures.add(topicFuture .thenCompose(t -> t.isPresent() ? t.get().close(closeWithoutWaitingClientDisconnect) : CompletableFuture.completedFuture(null))); @@ -2711,7 +2799,7 @@ private void updateMaxPublishRatePerTopicInMessages() { forEachTopic(topic -> { if (topic instanceof AbstractTopic) { ((AbstractTopic) topic).updateBrokerPublishRate(); - ((AbstractTopic) topic).updatePublishDispatcher(); + ((AbstractTopic) topic).updatePublishRateLimiter(); } })); } @@ -3001,7 +3089,7 @@ private void createPendingLoadTopic() { checkOwnershipAndCreatePersistentTopic(topic, pendingTopic.isCreateIfMissing(), pendingFuture, - pendingTopic.getProperties()); + pendingTopic.getProperties(), pendingTopic.getTopicPolicies()); pendingFuture.handle((persistentTopic, ex) -> { // release permit and process next pending topic if (acquiredPermit) { @@ -3282,10 +3370,10 @@ private CompletableFuture isAllowAutoTopicCreationAsync(final TopicName return CompletableFuture.completedFuture(false); } - // ServiceUnitStateChannelImpl.TOPIC expects to be a non-partitioned-topic now. + // ExtensibleLoadManagerImpl.internal topics expects to be non-partitioned-topics now. // We don't allow the auto-creation here. - // ServiceUnitStateChannelImpl.start() is responsible to create the topic. - if (ServiceUnitStateChannelImpl.TOPIC.equals(topicName.toString())) { + // ExtensibleLoadManagerImpl.start() is responsible to create the internal system topics. + if (ExtensibleLoadManagerImpl.isInternalTopic(topicName.toString())) { return CompletableFuture.completedFuture(false); } @@ -3440,6 +3528,24 @@ public CompletableFuture deleteTopicPolicies(TopicName topicName) { .deleteTopicPoliciesAsync(TopicName.get(topicName.getPartitionedTopicName())); } + public CompletableFuture deleteSchema(TopicName topicName) { + // delete schema at the upper level when deleting the partitioned topic. + if (topicName.isPartitioned()) { + return CompletableFuture.completedFuture(null); + } + String base = topicName.getPartitionedTopicName(); + String id = TopicName.get(base).getSchemaName(); + return getPulsar().getSchemaRegistryService().deleteSchemaStorage(id).whenComplete((vid, ex) -> { + if (vid != null && ex == null) { + // It's different from `SchemasResource.deleteSchema` + // because when we delete a topic, the schema + // history is meaningless. But when we delete a schema of a topic, a new schema could be + // registered in the future. + log.info("Deleted schema storage of id: {}", id); + } + }); + } + private CompletableFuture checkMaxTopicsPerNamespace(TopicName topicName, int numPartitions) { return pulsar.getPulsarResources().getNamespaceResources() .getPoliciesAsync(topicName.getNamespaceObject()) @@ -3543,5 +3649,6 @@ private static class TopicLoadingContext { private final boolean createIfMissing; private final CompletableFuture> topicFuture; private final Map properties; + private final TopicPolicies topicPolicies; } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerServiceException.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerServiceException.java index 3e77588b2459f..831d6068e2097 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerServiceException.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerServiceException.java @@ -258,6 +258,8 @@ private static ServerError getClientErrorCode(Throwable t, boolean checkCauseIfU return ServerError.ServiceNotReady; } else if (t instanceof TopicNotFoundException) { return ServerError.TopicNotFound; + } else if (t instanceof SubscriptionNotFoundException) { + return ServerError.SubscriptionNotFound; } else if (t instanceof IncompatibleSchemaException || t instanceof InvalidSchemaDataException) { // for backward compatible with old clients, invalid schema data diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java index ea491bd40d332..b2b2b512c8cfc 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java @@ -39,7 +39,8 @@ * number of keys assigned to each consumer. */ public class ConsistentHashingStickyKeyConsumerSelector implements StickyKeyConsumerSelector { - + // use NUL character as field separator for hash key calculation + private static final String KEY_SEPARATOR = "\0"; private final ReadWriteLock rwLock = new ReentrantReadWriteLock(); // Consistent-Hash ring @@ -59,8 +60,7 @@ public CompletableFuture addConsumer(Consumer consumer) { // Insert multiple points on the hash ring for every consumer // The points are deterministically added based on the hash of the consumer name for (int i = 0; i < numberOfPoints; i++) { - String key = consumer.consumerName() + i; - int hash = Murmur3_32Hash.getInstance().makeHash(key.getBytes()); + int hash = calculateHashForConsumerAndIndex(consumer, i); hashRing.compute(hash, (k, v) -> { if (v == null) { return Lists.newArrayList(consumer); @@ -79,14 +79,18 @@ public CompletableFuture addConsumer(Consumer consumer) { } } + private static int calculateHashForConsumerAndIndex(Consumer consumer, int index) { + String key = consumer.consumerName() + KEY_SEPARATOR + index; + return Murmur3_32Hash.getInstance().makeHash(key.getBytes()); + } + @Override public void removeConsumer(Consumer consumer) { rwLock.writeLock().lock(); try { // Remove all the points that were added for this consumer for (int i = 0; i < numberOfPoints; i++) { - String key = consumer.consumerName() + i; - int hash = Murmur3_32Hash.getInstance().makeHash(key.getBytes()); + int hash = calculateHashForConsumerAndIndex(consumer, i); hashRing.compute(hash, (k, v) -> { if (v == null) { return null; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java index 176f033a6dcf1..312f16c4004d5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java @@ -115,6 +115,9 @@ public class Consumer { private final ConsumerStatsImpl stats; private final boolean isDurable; + + private final boolean isPersistentTopic; + private static final AtomicIntegerFieldUpdater UNACKED_MESSAGES_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Consumer.class, "unackedMessages"); private volatile int unackedMessages = 0; @@ -172,6 +175,7 @@ public Consumer(Subscription subscription, SubType subType, String topicName, lo this.readCompacted = readCompacted; this.consumerName = consumerName; this.isDurable = isDurable; + this.isPersistentTopic = subscription.getTopic() instanceof PersistentTopic; this.keySharedMeta = keySharedMeta; this.cnx = cnx; this.msgOut = new Rate(); @@ -239,6 +243,7 @@ public Consumer(Subscription subscription, SubType subType, String topicName, lo this.pendingAcks = null; this.stats = null; this.isDurable = false; + this.isPersistentTopic = false; this.metadata = null; this.keySharedMeta = null; this.clientAddress = null; @@ -318,7 +323,7 @@ public Future sendMessages(final List entries, EntryBatch if (pendingAcks != null) { int batchSize = batchSizes.getBatchSize(i); int stickyKeyHash = getStickyKeyHash(entry); - long[] ackSet = getCursorAckSet(PositionImpl.get(entry.getLedgerId(), entry.getEntryId())); + long[] ackSet = batchIndexesAcks == null ? null : batchIndexesAcks.getAckSet(i); if (ackSet != null) { unackedMessages -= (batchSize - BitSet.valueOf(ackSet).cardinality()); } @@ -507,17 +512,17 @@ private CompletableFuture individualAckNormal(CommandAck ack, Map topics) { BaseCommand command = Commands.newWatchTopicListSuccess(requestId, watcherId, topicsHash, topics); interceptAndWriteCommand(command); } + /*** + * {@inheritDoc} + */ @Override public void sendWatchTopicListUpdate(long watcherId, List newTopics, List deletedTopics, String topicsHash) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java index f3b355d63325f..5057b7b045a92 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java @@ -990,7 +990,6 @@ protected void handleConnect(CommandConnect connect) { try { byte[] authData = connect.hasAuthData() ? connect.getAuthData() : emptyArray; AuthData clientData = AuthData.of(authData); - // init authentication if (connect.hasAuthMethodName()) { authMethod = connect.getAuthMethodName(); @@ -1049,10 +1048,22 @@ protected void handleConnect(CommandConnect connect) { .getAuthenticationService() .getAuthenticationProvider(originalAuthMethod); + /** + * When both the broker and the proxy are configured with anonymousUserRole + * if the client does not configure an authentication method + * the proxy side will set the value of anonymousUserRole to clientAuthRole when it creates a connection + * and the value of clientAuthMethod will be none. + * Similarly, should also set the value of authRole to anonymousUserRole on the broker side. + */ if (originalAuthenticationProvider == null) { - throw new AuthenticationException( - String.format("Can't find AuthenticationProvider for original role" - + " using auth method [%s] is not available", originalAuthMethod)); + authRole = getBrokerService().getAuthenticationService().getAnonymousUserRole() + .orElseThrow(() -> + new AuthenticationException("No anonymous role, and can't find " + + "AuthenticationProvider for original role using auth method " + + "[" + originalAuthMethod + "] is not available")); + originalPrincipal = authRole; + completeConnect(clientProtocolVersion, clientVersion); + return; } originalAuthDataCopy = AuthData.of(connect.getOriginalAuthData().getBytes()); @@ -1360,7 +1371,7 @@ protected void handleProducer(final CommandProducer cmdProducer) { cmdProducer.hasInitialSubscriptionName() ? cmdProducer.getInitialSubscriptionName() : null; final boolean supportsPartialProducer = supportsPartialProducer(); - TopicName topicName = validateTopicName(cmdProducer.getTopic(), requestId, cmdProducer); + final TopicName topicName = validateTopicName(cmdProducer.getTopic(), requestId, cmdProducer); if (topicName == null) { return; } @@ -1391,36 +1402,36 @@ protected void handleProducer(final CommandProducer cmdProducer) { CompletableFuture existingProducerFuture = producers.putIfAbsent(producerId, producerFuture); if (existingProducerFuture != null) { - if (existingProducerFuture.isDone() && !existingProducerFuture.isCompletedExceptionally()) { - Producer producer = existingProducerFuture.getNow(null); - log.info("[{}] Producer with the same id is already created:" - + " producerId={}, producer={}", remoteAddress, producerId, producer); - commandSender.sendProducerSuccessResponse(requestId, producer.getProducerName(), - producer.getSchemaVersion()); - return null; - } else { + if (!existingProducerFuture.isDone()) { // There was an early request to create a producer with same producerId. // This can happen when client timeout is lower than the broker timeouts. // We need to wait until the previous producer creation request // either complete or fails. - ServerError error = null; - if (!existingProducerFuture.isDone()) { - error = ServerError.ServiceNotReady; - } else { - error = getErrorCode(existingProducerFuture); - // remove producer with producerId as it's already completed with exception - producers.remove(producerId, existingProducerFuture); - } log.warn("[{}][{}] Producer with id is already present on the connection, producerId={}", remoteAddress, topicName, producerId); - commandSender.sendErrorResponse(requestId, error, "Producer is already present on the connection"); - return null; + commandSender.sendErrorResponse(requestId, ServerError.ServiceNotReady, + "Producer is already present on the connection"); + } else if (existingProducerFuture.isCompletedExceptionally()) { + // remove producer with producerId as it's already completed with exception + log.warn("[{}][{}] Producer with id is failed to register present on the connection, producerId={}", + remoteAddress, topicName, producerId); + ServerError error = getErrorCode(existingProducerFuture); + producers.remove(producerId, existingProducerFuture); + commandSender.sendErrorResponse(requestId, error, + "Producer is already failed to register present on the connection"); + } else { + Producer producer = existingProducerFuture.getNow(null); + log.info("[{}] [{}] Producer with the same id is already created:" + + " producerId={}, producer={}", remoteAddress, topicName, producerId, producer); + commandSender.sendProducerSuccessResponse(requestId, producer.getProducerName(), + producer.getSchemaVersion()); } + return null; } if (log.isDebugEnabled()) { - log.debug("[{}][{}] Creating producer. producerId={}, schema is {}", remoteAddress, topicName, - producerId, schema == null ? "absent" : "present"); + log.debug("[{}][{}] Creating producer. producerId={}, producerName={}, schema is {}", remoteAddress, + topicName, producerId, producerName, schema == null ? "absent" : "present"); } service.getOrCreateTopic(topicName.toString()).thenCompose((Topic topic) -> { @@ -1553,7 +1564,7 @@ protected void handleProducer(final CommandProducer cmdProducer) { // Do not print stack traces for expected exceptions if (cause instanceof NoSuchElementException) { - cause = new TopicNotFoundException("Topic Not Found."); + cause = new TopicNotFoundException(String.format("Topic not found %s", topicName.toString())); log.warn("[{}] Failed to load topic {}, producerId={}: Topic not found", remoteAddress, topicName, producerId); } else if (!Exceptions.areExceptionsPresentInChain(cause, @@ -2052,23 +2063,29 @@ protected void handleGetLastMessageId(CommandGetLastMessageId getLastMessageId) long requestId = getLastMessageId.getRequestId(); Topic topic = consumer.getSubscription().getTopic(); - Position lastPosition = topic.getLastPosition(); - int partitionIndex = TopicName.getPartitionIndex(topic.getName()); - - Position markDeletePosition = null; - if (consumer.getSubscription() instanceof PersistentSubscription) { - markDeletePosition = ((PersistentSubscription) consumer.getSubscription()).getCursor() - .getMarkDeletedPosition(); - } - - getLargestBatchIndexWhenPossible( - topic, - (PositionImpl) lastPosition, - (PositionImpl) markDeletePosition, - partitionIndex, - requestId, - consumer.getSubscription().getName()); + topic.checkIfTransactionBufferRecoverCompletely(true).thenRun(() -> { + Position lastPosition = ((PersistentTopic) topic).getMaxReadPosition(); + int partitionIndex = TopicName.getPartitionIndex(topic.getName()); + + Position markDeletePosition = null; + if (consumer.getSubscription() instanceof PersistentSubscription) { + markDeletePosition = ((PersistentSubscription) consumer.getSubscription()).getCursor() + .getMarkDeletedPosition(); + } + getLargestBatchIndexWhenPossible( + topic, + (PositionImpl) lastPosition, + (PositionImpl) markDeletePosition, + partitionIndex, + requestId, + consumer.getSubscription().getName(), + consumer.readCompacted()); + }).exceptionally(e -> { + writeAndFlush(Commands.newError(getLastMessageId.getRequestId(), + ServerError.UnknownError, "Failed to recover Transaction Buffer.")); + return null; + }); } else { writeAndFlush(Commands.newError(getLastMessageId.getRequestId(), ServerError.MetadataError, "Consumer not found")); @@ -2081,15 +2098,17 @@ private void getLargestBatchIndexWhenPossible( PositionImpl markDeletePosition, int partitionIndex, long requestId, - String subscriptionName) { + String subscriptionName, + boolean readCompacted) { PersistentTopic persistentTopic = (PersistentTopic) topic; ManagedLedgerImpl ml = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); // If it's not pointing to a valid entry, respond messageId of the current position. // If the compaction cursor reach the end of the topic, respond messageId from compacted ledger - CompletableFuture compactionHorizonFuture = - persistentTopic.getTopicCompactionService().getLastCompactedPosition(); + CompletableFuture compactionHorizonFuture = readCompacted + ? persistentTopic.getTopicCompactionService().getLastCompactedPosition() : + CompletableFuture.completedFuture(null); compactionHorizonFuture.whenComplete((compactionHorizon, ex) -> { if (ex != null) { @@ -2098,8 +2117,22 @@ private void getLargestBatchIndexWhenPossible( return; } - if (lastPosition.getEntryId() == -1 || (compactionHorizon != null - && lastPosition.compareTo((PositionImpl) compactionHorizon) <= 0)) { + if (lastPosition.getEntryId() == -1 || !ml.ledgerExists(lastPosition.getLedgerId())) { + // there is no entry in the original topic + if (compactionHorizon != null) { + // if readCompacted is true, we need to read the last entry from compacted topic + handleLastMessageIdFromCompactionService(persistentTopic, requestId, partitionIndex, + markDeletePosition); + } else { + // if readCompacted is false, we need to return MessageId.earliest + writeAndFlush(Commands.newGetLastMessageIdResponse(requestId, -1, -1, partitionIndex, -1, + markDeletePosition != null ? markDeletePosition.getLedgerId() : -1, + markDeletePosition != null ? markDeletePosition.getEntryId() : -1)); + } + return; + } + + if (compactionHorizon != null && lastPosition.compareTo((PositionImpl) compactionHorizon) <= 0) { handleLastMessageIdFromCompactionService(persistentTopic, requestId, partitionIndex, markDeletePosition); return; @@ -2128,7 +2161,8 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { batchSizeFuture.whenComplete((batchSize, e) -> { if (e != null) { - if (e.getCause() instanceof ManagedLedgerException.NonRecoverableLedgerException) { + if (e.getCause() instanceof ManagedLedgerException.NonRecoverableLedgerException + && readCompacted) { handleLastMessageIdFromCompactionService(persistentTopic, requestId, partitionIndex, markDeletePosition); } else { @@ -2366,7 +2400,7 @@ remoteAddress, new String(commandGetSchema.getSchemaVersion()), schemaService.getSchema(schemaName, schemaVersion).thenAccept(schemaAndMetadata -> { if (schemaAndMetadata == null) { commandSender.sendGetSchemaErrorResponse(requestId, ServerError.TopicNotFound, - "Topic not found or no-schema"); + String.format("Topic not found or no-schema %s", commandGetSchema.getTopic())); } else { commandSender.sendGetSchemaResponse(requestId, SchemaInfoUtil.newSchemaInfo(schemaName, schemaAndMetadata.schema), schemaAndMetadata.version); @@ -2384,7 +2418,7 @@ protected void handleGetOrCreateSchema(CommandGetOrCreateSchema commandGetOrCrea log.debug("Received CommandGetOrCreateSchema call from {}", remoteAddress); } long requestId = commandGetOrCreateSchema.getRequestId(); - String topicName = commandGetOrCreateSchema.getTopic(); + final String topicName = commandGetOrCreateSchema.getTopic(); SchemaData schemaData = getSchema(commandGetOrCreateSchema.getSchema()); SchemaData schema = schemaData.getType() == SchemaType.NONE ? null : schemaData; service.getTopicIfExists(topicName).thenAccept(topicOpt -> { @@ -2404,7 +2438,7 @@ protected void handleGetOrCreateSchema(CommandGetOrCreateSchema commandGetOrCrea }); } else { commandSender.sendGetOrCreateSchemaErrorResponse(requestId, ServerError.TopicNotFound, - "Topic not found"); + String.format("Topic not found %s", topicName)); } }).exceptionally(ex -> { ServerError errorCode = BrokerServiceException.getClientErrorCode(ex); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java index 09f8de818db0a..71f78e21f938f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java @@ -18,12 +18,16 @@ */ package org.apache.pulsar.broker.service; +import static java.util.Objects.requireNonNull; +import com.github.benmanes.caffeine.cache.AsyncLoadingCache; +import com.github.benmanes.caffeine.cache.Caffeine; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Sets; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; @@ -41,10 +45,8 @@ import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.client.impl.MessageImpl; import org.apache.pulsar.client.impl.TopicMessageImpl; -import org.apache.pulsar.client.util.RetryUtil; import org.apache.pulsar.common.events.ActionType; import org.apache.pulsar.common.events.EventType; import org.apache.pulsar.common.events.PulsarEvent; @@ -54,6 +56,7 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.util.FutureUtil; +import org.jetbrains.annotations.NotNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -78,34 +81,52 @@ public class SystemTopicBasedTopicPoliciesService implements TopicPoliciesServic private final Map>> readerCaches = new ConcurrentHashMap<>(); - @VisibleForTesting - final Map policyCacheInitMap = new ConcurrentHashMap<>(); + + final Map> policyCacheInitMap = new ConcurrentHashMap<>(); @VisibleForTesting final Map>> listeners = new ConcurrentHashMap<>(); + private final AsyncLoadingCache> writerCaches; + public SystemTopicBasedTopicPoliciesService(PulsarService pulsarService) { this.pulsarService = pulsarService; this.clusterName = pulsarService.getConfiguration().getClusterName(); this.localCluster = Sets.newHashSet(clusterName); + this.writerCaches = Caffeine.newBuilder() + .expireAfterAccess(5, TimeUnit.MINUTES) + .removalListener((namespaceName, writer, cause) -> { + ((SystemTopicClient.Writer) writer).closeAsync().exceptionally(ex -> { + log.error("[{}] Close writer error.", namespaceName, ex); + return null; + }); + }) + .buildAsync((namespaceName, executor) -> { + SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory + .createTopicPoliciesSystemTopicClient(namespaceName); + return systemTopicClient.newWriterAsync(); + }); } @Override public CompletableFuture deleteTopicPoliciesAsync(TopicName topicName) { + if (NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { + return CompletableFuture.completedFuture(null); + } return sendTopicPolicyEvent(topicName, ActionType.DELETE, null); } @Override public CompletableFuture updateTopicPoliciesAsync(TopicName topicName, TopicPolicies policies) { + if (NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { + return CompletableFuture.failedFuture(new BrokerServiceException.NotAllowedException( + "Not allowed to update topic policy for the heartbeat topic")); + } return sendTopicPolicyEvent(topicName, ActionType.UPDATE, policies); } private CompletableFuture sendTopicPolicyEvent(TopicName topicName, ActionType actionType, TopicPolicies policies) { - if (NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { - return CompletableFuture.failedFuture( - new BrokerServiceException.NotAllowedException("Not allowed to send event to health check topic")); - } return pulsarService.getPulsarResources().getNamespaceResources() .getPoliciesAsync(topicName.getNamespaceObject()) .thenCompose(namespacePolicies -> { @@ -119,39 +140,32 @@ private CompletableFuture sendTopicPolicyEvent(TopicName topicName, Action } catch (PulsarServerException e) { return CompletableFuture.failedFuture(e); } - - SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory - .createTopicPoliciesSystemTopicClient(topicName.getNamespaceObject()); - - return systemTopicClient.newWriterAsync() - .thenCompose(writer -> { - PulsarEvent event = getPulsarEvent(topicName, actionType, policies); - CompletableFuture writeFuture = - ActionType.DELETE.equals(actionType) ? writer.deleteAsync(getEventKey(event), event) - : writer.writeAsync(getEventKey(event), event); - return writeFuture.handle((messageId, e) -> { - if (e != null) { - return CompletableFuture.failedFuture(e); + CompletableFuture result = new CompletableFuture<>(); + writerCaches.get(topicName.getNamespaceObject()) + .whenComplete((writer, cause) -> { + if (cause != null) { + writerCaches.synchronous().invalidate(topicName.getNamespaceObject()); + result.completeExceptionally(cause); } else { - if (messageId != null) { - return CompletableFuture.completedFuture(null); - } else { - return CompletableFuture.failedFuture( - new RuntimeException("Got message id is null.")); - } - } - }).thenRun(() -> - writer.closeAsync().whenComplete((v, cause) -> { - if (cause != null) { - log.error("[{}] Close writer error.", topicName, cause); + PulsarEvent event = getPulsarEvent(topicName, actionType, policies); + CompletableFuture writeFuture = ActionType.DELETE.equals(actionType) + ? writer.deleteAsync(getEventKey(event), event) + : writer.writeAsync(getEventKey(event), event); + writeFuture.whenComplete((messageId, e) -> { + if (e != null) { + result.completeExceptionally(e); + } else { + if (messageId != null) { + result.complete(null); } else { - if (log.isDebugEnabled()) { - log.debug("[{}] Close writer success.", topicName); - } + result.completeExceptionally( + new RuntimeException("Got message id is null.")); } - }) - ); + } + }); + } }); + return result; }); } @@ -217,14 +231,17 @@ public TopicPolicies getTopicPolicies(TopicName topicName) throws TopicPoliciesC @Override public TopicPolicies getTopicPolicies(TopicName topicName, boolean isGlobal) throws TopicPoliciesCacheNotInitException { + if (NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { + return null; + } if (!policyCacheInitMap.containsKey(topicName.getNamespaceObject())) { NamespaceName namespace = topicName.getNamespaceObject(); - prepareInitPoliciesCache(namespace, new CompletableFuture<>()); + prepareInitPoliciesCacheAsync(namespace); } MutablePair result = new MutablePair<>(); policyCacheInitMap.compute(topicName.getNamespaceObject(), (k, initialized) -> { - if (initialized == null || !initialized) { + if (initialized == null || !initialized.isDone()) { result.setLeft(new TopicPoliciesCacheNotInitException()); } else { TopicPolicies topicPolicies = @@ -242,6 +259,34 @@ public TopicPolicies getTopicPolicies(TopicName topicName, } } + @NotNull + @Override + public CompletableFuture> getTopicPoliciesAsync(@NotNull TopicName topicName, + boolean isGlobal) { + requireNonNull(topicName); + final CompletableFuture preparedFuture = prepareInitPoliciesCacheAsync(topicName.getNamespaceObject()); + return preparedFuture.thenApply(__ -> { + final TopicPolicies candidatePolicies = isGlobal + ? globalPoliciesCache.get(TopicName.get(topicName.getPartitionedTopicName())) + : policiesCache.get(TopicName.get(topicName.getPartitionedTopicName())); + return Optional.ofNullable(candidatePolicies); + }); + } + + @NotNull + @Override + public CompletableFuture> getTopicPoliciesAsync(@NotNull TopicName topicName) { + requireNonNull(topicName); + final CompletableFuture preparedFuture = prepareInitPoliciesCacheAsync(topicName.getNamespaceObject()); + return preparedFuture.thenApply(__ -> { + final TopicPolicies localPolicies = policiesCache.get(TopicName.get(topicName.getPartitionedTopicName())); + if (localPolicies != null) { + return Optional.of(localPolicies); + } + return Optional.ofNullable(globalPoliciesCache.get(TopicName.get(topicName.getPartitionedTopicName()))); + }); + } + @Override public TopicPolicies getTopicPoliciesIfExists(TopicName topicName) { return policiesCache.get(TopicName.get(topicName.getPartitionedTopicName())); @@ -265,55 +310,60 @@ public CompletableFuture getTopicPoliciesBypassCacheAsync(TopicNa @Override public CompletableFuture addOwnedNamespaceBundleAsync(NamespaceBundle namespaceBundle) { - CompletableFuture result = new CompletableFuture<>(); NamespaceName namespace = namespaceBundle.getNamespaceObject(); if (NamespaceService.isHeartbeatNamespace(namespace)) { - result.complete(null); - return result; + return CompletableFuture.completedFuture(null); } synchronized (this) { if (readerCaches.get(namespace) != null) { ownedBundlesCountPerNamespace.get(namespace).incrementAndGet(); - result.complete(null); + return CompletableFuture.completedFuture(null); } else { - prepareInitPoliciesCache(namespace, result); + return prepareInitPoliciesCacheAsync(namespace); } } - return result; } - private void prepareInitPoliciesCache(@Nonnull NamespaceName namespace, CompletableFuture result) { - if (policyCacheInitMap.putIfAbsent(namespace, false) == null) { - CompletableFuture> readerCompletableFuture = - createSystemTopicClientWithRetry(namespace); + private @Nonnull CompletableFuture prepareInitPoliciesCacheAsync(@Nonnull NamespaceName namespace) { + requireNonNull(namespace); + return policyCacheInitMap.computeIfAbsent(namespace, (k) -> { + final CompletableFuture> readerCompletableFuture = + createSystemTopicClient(namespace); readerCaches.put(namespace, readerCompletableFuture); ownedBundlesCountPerNamespace.putIfAbsent(namespace, new AtomicInteger(1)); - readerCompletableFuture.thenAccept(reader -> { - initPolicesCache(reader, result); - result.thenRun(() -> readMorePolicies(reader)); - }).exceptionally(ex -> { - log.error("[{}] Failed to create reader on __change_events topic", namespace, ex); - cleanCacheAndCloseReader(namespace, false); - result.completeExceptionally(ex); + final CompletableFuture initFuture = readerCompletableFuture + .thenCompose(reader -> { + final CompletableFuture stageFuture = new CompletableFuture<>(); + initPolicesCache(reader, stageFuture); + return stageFuture + // Read policies in background + .thenAccept(__ -> readMorePoliciesAsync(reader)); + }); + initFuture.exceptionally(ex -> { + try { + log.error("[{}] Failed to create reader on __change_events topic", namespace, ex); + cleanCacheAndCloseReader(namespace, false); + } catch (Throwable cleanupEx) { + // Adding this catch to avoid break callback chain + log.error("[{}] Failed to cleanup reader on __change_events topic", namespace, cleanupEx); + } return null; }); - } + // let caller know we've got an exception. + return initFuture; + }); } - protected CompletableFuture> createSystemTopicClientWithRetry( + protected CompletableFuture> createSystemTopicClient( NamespaceName namespace) { - CompletableFuture> result = new CompletableFuture<>(); try { createSystemTopicFactoryIfNeeded(); - } catch (PulsarServerException e) { - result.completeExceptionally(e); - return result; + } catch (PulsarServerException ex) { + return FutureUtil.failedFuture(ex); } - SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory + final SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory .createTopicPoliciesSystemTopicClient(namespace); - Backoff backoff = new Backoff(1, TimeUnit.SECONDS, 3, TimeUnit.SECONDS, 10, TimeUnit.SECONDS); - RetryUtil.retryAsynchronously(systemTopicClient::newReaderAsync, backoff, pulsarService.getExecutor(), result); - return result; + return systemTopicClient.newReaderAsync(); } @Override @@ -325,7 +375,7 @@ public CompletableFuture removeOwnedNamespaceBundleAsync(NamespaceBundle n } AtomicInteger bundlesCount = ownedBundlesCountPerNamespace.get(namespace); if (bundlesCount == null || bundlesCount.decrementAndGet() <= 0) { - cleanCacheAndCloseReader(namespace, true); + cleanCacheAndCloseReader(namespace, true, true); } return CompletableFuture.completedFuture(null); } @@ -381,8 +431,7 @@ private void initPolicesCache(SystemTopicClient.Reader reader, Comp if (log.isDebugEnabled()) { log.debug("[{}] Reach the end of the system topic.", reader.getSystemTopic().getTopicName()); } - policyCacheInitMap.computeIfPresent( - reader.getSystemTopic().getTopicName().getNamespaceObject(), (k, v) -> true); + // replay policy message policiesCache.forEach(((topicName, topicPolicies) -> { if (listeners.get(topicName) != null) { @@ -395,12 +444,21 @@ private void initPolicesCache(SystemTopicClient.Reader reader, Comp } } })); + future.complete(null); } }); } private void cleanCacheAndCloseReader(@Nonnull NamespaceName namespace, boolean cleanOwnedBundlesCount) { + cleanCacheAndCloseReader(namespace, cleanOwnedBundlesCount, false); + } + + private void cleanCacheAndCloseReader(@Nonnull NamespaceName namespace, boolean cleanOwnedBundlesCount, + boolean cleanWriterCache) { + if (cleanWriterCache) { + writerCaches.synchronous().invalidate(namespace); + } CompletableFuture> readerFuture = readerCaches.remove(namespace); if (cleanOwnedBundlesCount) { @@ -420,7 +478,13 @@ private void cleanCacheAndCloseReader(@Nonnull NamespaceName namespace, boolean }); } - private void readMorePolicies(SystemTopicClient.Reader reader) { + /** + * This is an async method for the background reader to continue syncing new messages. + * + * Note: You should not do any blocking call here. because it will affect + * #{@link SystemTopicBasedTopicPoliciesService#getTopicPoliciesAsync(TopicName)} method to block loading topic. + */ + private void readMorePoliciesAsync(SystemTopicClient.Reader reader) { reader.readNextAsync() .thenAccept(msg -> { refreshTopicPoliciesCache(msg); @@ -428,7 +492,7 @@ private void readMorePolicies(SystemTopicClient.Reader reader) { }) .whenComplete((__, ex) -> { if (ex == null) { - readMorePolicies(reader); + readMorePoliciesAsync(reader); } else { Throwable cause = FutureUtil.unwrapCompletionException(ex); if (cause instanceof PulsarClientException.AlreadyClosedException) { @@ -437,7 +501,7 @@ private void readMorePolicies(SystemTopicClient.Reader reader) { reader.getSystemTopic().getTopicName().getNamespaceObject(), false); } else { log.warn("Read more topic polices exception, read again.", ex); - readMorePolicies(reader); + readMorePoliciesAsync(reader); } } }); @@ -605,7 +669,7 @@ boolean checkReaderIsCached(NamespaceName namespaceName) { } @VisibleForTesting - public Boolean getPoliciesCacheInit(NamespaceName namespaceName) { + public CompletableFuture getPoliciesCacheInit(NamespaceName namespaceName) { return policyCacheInitMap.get(namespaceName); } @@ -643,5 +707,10 @@ protected Map>> getListeners( return listeners; } + @VisibleForTesting + protected AsyncLoadingCache> getWriterCaches() { + return writerCaches; + } + private static final Logger log = LoggerFactory.getLogger(SystemTopicBasedTopicPoliciesService.class); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicListService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicListService.java index 7aa50057d73c9..aea5b9fc65b46 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicListService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicListService.java @@ -31,6 +31,7 @@ import org.apache.pulsar.common.api.proto.CommandWatchTopicListClose; import org.apache.pulsar.common.api.proto.ServerError; import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.topics.TopicList; import org.apache.pulsar.common.util.collections.ConcurrentLongHashMap; import org.apache.pulsar.metadata.api.NotificationType; @@ -42,11 +43,16 @@ public class TopicListService { public static class TopicListWatcher implements BiConsumer { + /** Topic names which are matching, the topic name contains the partition suffix. **/ private final List matchingTopics; private final TopicListService topicListService; private final long id; + /** The regexp for the topic name(not contains partition suffix). **/ private final Pattern topicsPattern; + /*** + * @param topicsPattern The regexp for the topic name(not contains partition suffix). + */ public TopicListWatcher(TopicListService topicListService, long id, Pattern topicsPattern, List topics) { this.topicListService = topicListService; @@ -59,9 +65,12 @@ public List getMatchingTopics() { return matchingTopics; } + /*** + * @param topicName topic name which contains partition suffix. + */ @Override public void accept(String topicName, NotificationType notificationType) { - if (topicsPattern.matcher(topicName).matches()) { + if (topicsPattern.matcher(TopicName.get(topicName).getPartitionedTopicName()).matches()) { List newTopics; List deletedTopics; if (notificationType == NotificationType.Deleted) { @@ -109,6 +118,9 @@ public void inactivate() { } } + /*** + * @param topicsPattern The regexp for the topic name(not contains partition suffix). + */ public void handleWatchTopicList(NamespaceName namespaceName, long watcherId, long requestId, Pattern topicsPattern, String topicsHash, Semaphore lookupSemaphore) { @@ -184,7 +196,9 @@ public void handleWatchTopicList(NamespaceName namespaceName, long watcherId, lo }); } - + /*** + * @param topicsPattern The regexp for the topic name(not contains partition suffix). + */ public void initializeTopicsListWatcher(CompletableFuture watcherFuture, NamespaceName namespace, long watcherId, Pattern topicsPattern) { namespaceService.getListOfPersistentTopics(namespace). @@ -246,6 +260,10 @@ public void deleteTopicListWatcher(Long watcherId) { log.info("[{}] Closed watcher, watcherId={}", connection.getRemoteAddress(), watcherId); } + /** + * @param deletedTopics topic names deleted(contains the partition suffix). + * @param newTopics topics names added(contains the partition suffix). + */ public void sendTopicListUpdate(long watcherId, String topicsHash, List deletedTopics, List newTopics) { connection.getCommandSender().sendWatchTopicListUpdate(watcherId, newTopics, deletedTopics, topicsHash); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java index c4bcc0c39353c..aa3a6aaeff29f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java @@ -22,6 +22,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import javax.annotation.Nonnull; import org.apache.pulsar.broker.service.BrokerServiceException.TopicPoliciesCacheNotInitException; import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.client.impl.BackoffBuilder; @@ -31,6 +32,7 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.util.FutureUtil; +import org.jetbrains.annotations.NotNull; /** * Topic policies service. @@ -109,6 +111,32 @@ default CompletableFuture> getTopicPoliciesAsyncWithRetr return response; } + /** + * Asynchronously retrieves topic policies. + * This triggers the Pulsar broker's internal client to load policies from the + * system topic `persistent://tenant/namespace/__change_event`. + * + * @param topicName The name of the topic. + * @param isGlobal Indicates if the policies are global. + * @return A CompletableFuture containing an Optional of TopicPolicies. + * @throws NullPointerException If the topicName is null. + */ + @Nonnull + CompletableFuture> getTopicPoliciesAsync(@Nonnull TopicName topicName, boolean isGlobal); + + /** + * Asynchronously retrieves topic policies. + * This triggers the Pulsar broker's internal client to load policies from the + * system topic `persistent://tenant/namespace/__change_event`. + * + * NOTE: If local policies are not available, it will fallback to using topic global policies. + * @param topicName The name of the topic. + * @return A CompletableFuture containing an Optional of TopicPolicies. + * @throws NullPointerException If the topicName is null. + */ + @Nonnull + CompletableFuture> getTopicPoliciesAsync(@Nonnull TopicName topicName); + /** * Get policies for a topic without cache async. * @param topicName topic name @@ -162,6 +190,19 @@ public TopicPolicies getTopicPolicies(TopicName topicName, boolean isGlobal) return null; } + @NotNull + @Override + public CompletableFuture> getTopicPoliciesAsync(@NotNull TopicName topicName, + boolean isGlobal) { + return CompletableFuture.completedFuture(Optional.empty()); + } + + @NotNull + @Override + public CompletableFuture> getTopicPoliciesAsync(@NotNull TopicName topicName) { + return CompletableFuture.completedFuture(Optional.empty()); + } + @Override public TopicPolicies getTopicPoliciesIfExists(TopicName topicName) { return null; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherMultipleConsumers.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherMultipleConsumers.java index c106b1603f6bd..bfb7785a4f5c4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherMultipleConsumers.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherMultipleConsumers.java @@ -184,7 +184,7 @@ public RedeliveryTracker getRedeliveryTracker() { } @Override - public void sendMessages(List entries) { + public synchronized void sendMessages(List entries) { Consumer consumer = TOTAL_AVAILABLE_PERMITS_UPDATER.get(this) > 0 ? getNextConsumer() : null; if (consumer != null) { SendMessageInfo sendMessageInfo = SendMessageInfo.getThreadLocal(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java index c764283cb4459..911ea7896b3bc 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java @@ -178,7 +178,7 @@ public CompletableFuture initialize() { isEncryptionRequired = policies.encryption_required; isAllowAutoUpdateSchema = policies.is_allow_auto_update_schema; } - updatePublishDispatcher(); + updatePublishRateLimiter(); updateResourceGroupLimiter(policies); return updateClusterMigrated(); }); @@ -634,6 +634,7 @@ CompletableFuture removeReplicator(String remoteCluster) { replicators.get(remoteCluster).disconnect().thenRun(() -> { log.info("[{}] Successfully removed replicator {}", name, remoteCluster); + replicators.remove(remoteCluster); }).exceptionally(e -> { log.error("[{}] Failed to close replication producer {} {}", topic, name, e.getMessage(), e); @@ -920,7 +921,7 @@ public CompletableFuture asyncGetStats(boolean getP }); stats.topicEpoch = topicEpoch.orElse(null); - stats.ownerBroker = brokerService.pulsar().getLookupServiceAddress(); + stats.ownerBroker = brokerService.pulsar().getBrokerId(); future.complete(stats); return future; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/GeoPersistentReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/GeoPersistentReplicator.java index 08882982297ab..082dfed10c664 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/GeoPersistentReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/GeoPersistentReplicator.java @@ -123,18 +123,6 @@ protected boolean replicateEntries(List entries) { continue; } - if (msg.isExpired(messageTTLInSeconds)) { - msgExpired.recordEvent(0 /* no value stat */); - if (log.isDebugEnabled()) { - log.debug("[{}] Discarding expired message at position {}, replicateTo {}", - replicatorId, entry.getPosition(), msg.getReplicateTo()); - } - cursor.asyncDelete(entry.getPosition(), this, entry.getPosition()); - entry.release(); - msg.recycle(); - continue; - } - if (STATE_UPDATER.get(this) != State.Started || isLocalMessageSkippedOnce) { // The producer is not ready yet after having stopped/restarted. Drop the message because it will // recovered when the producer is ready @@ -149,9 +137,6 @@ protected boolean replicateEntries(List entries) { } dispatchRateLimiter.ifPresent(rateLimiter -> rateLimiter.tryDispatchPermit(1, entry.getLength())); - - msgOut.recordEvent(headersAndPayload.readableBytes()); - msg.setReplicatedFrom(localCluster); headersAndPayload.retain(); @@ -181,6 +166,7 @@ protected boolean replicateEntries(List entries) { msg.setSchemaInfoForReplicator(schemaFuture.get()); msg.getMessageBuilder().clearTxnidMostBits(); msg.getMessageBuilder().clearTxnidLeastBits(); + msgOut.recordEvent(headersAndPayload.readableBytes()); // Increment pending messages for messages produced locally PENDING_MESSAGES_UPDATER.incrementAndGet(this); producer.sendAsync(msg, ProducerSendCallback.create(this, entry, msg)); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java index ed4e70bfd2953..c4319b0f80185 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java @@ -55,6 +55,8 @@ public class MessageDeduplication { private final ManagedLedger managedLedger; private ManagedCursor managedCursor; + private static final String IS_LAST_CHUNK = "isLastChunk"; + enum Status { // Deduplication is initialized @@ -324,11 +326,12 @@ public MessageDupStatus isDuplicate(PublishContext publishContext, ByteBuf heade String producerName = publishContext.getProducerName(); long sequenceId = publishContext.getSequenceId(); long highestSequenceId = Math.max(publishContext.getHighestSequenceId(), sequenceId); + MessageMetadata md = null; if (producerName.startsWith(replicatorPrefix)) { // Message is coming from replication, we need to use the original producer name and sequence id // for the purpose of deduplication and not rely on the "replicator" name. int readerIndex = headersAndPayload.readerIndex(); - MessageMetadata md = Commands.parseMessageMetadata(headersAndPayload); + md = Commands.parseMessageMetadata(headersAndPayload); producerName = md.getProducerName(); sequenceId = md.getSequenceId(); highestSequenceId = Math.max(md.getHighestSequenceId(), sequenceId); @@ -337,7 +340,23 @@ public MessageDupStatus isDuplicate(PublishContext publishContext, ByteBuf heade publishContext.setOriginalHighestSequenceId(highestSequenceId); headersAndPayload.readerIndex(readerIndex); } - + long chunkID = -1; + long totalChunk = -1; + if (publishContext.isChunked()) { + if (md == null) { + int readerIndex = headersAndPayload.readerIndex(); + md = Commands.parseMessageMetadata(headersAndPayload); + headersAndPayload.readerIndex(readerIndex); + } + chunkID = md.getChunkId(); + totalChunk = md.getNumChunksFromMsg(); + } + // All chunks of a message use the same message metadata and sequence ID, + // so we only need to check the sequence ID for the last chunk in a chunk message. + if (chunkID != -1 && chunkID != totalChunk - 1) { + publishContext.setProperty(IS_LAST_CHUNK, Boolean.FALSE); + return MessageDupStatus.NotDup; + } // Synchronize the get() and subsequent put() on the map. This would only be relevant if the producer // disconnects and re-connects very quickly. At that point the call can be coming from a different thread synchronized (highestSequencedPushed) { @@ -363,6 +382,11 @@ public MessageDupStatus isDuplicate(PublishContext publishContext, ByteBuf heade } highestSequencedPushed.put(producerName, highestSequenceId); } + // Only put sequence ID into highestSequencedPushed and + // highestSequencedPersisted until receive and persistent the last chunk. + if (chunkID != -1 && chunkID == totalChunk - 1) { + publishContext.setProperty(IS_LAST_CHUNK, Boolean.TRUE); + } return MessageDupStatus.NotDup; } @@ -383,8 +407,10 @@ public void recordMessagePersisted(PublishContext publishContext, PositionImpl p sequenceId = publishContext.getOriginalSequenceId(); highestSequenceId = publishContext.getOriginalHighestSequenceId(); } - - highestSequencedPersisted.put(producerName, Math.max(highestSequenceId, sequenceId)); + Boolean isLastChunk = (Boolean) publishContext.getProperty(IS_LAST_CHUNK); + if (isLastChunk == null || isLastChunk) { + highestSequencedPersisted.put(producerName, Math.max(highestSequenceId, sequenceId)); + } if (++snapshotCounter >= snapshotInterval) { snapshotCounter = 0; takeSnapshot(position); @@ -437,6 +463,10 @@ private boolean isDeduplicationEnabled() { * Topic will call this method whenever a producer connects. */ public void producerAdded(String producerName) { + if (!isEnabled()) { + return; + } + // Producer is no-longer inactive inactiveProducers.remove(producerName); } @@ -445,6 +475,10 @@ public void producerAdded(String producerName) { * Topic will call this method whenever a producer disconnects. */ public void producerRemoved(String producerName) { + if (!isEnabled()) { + return; + } + // Producer is no-longer active inactiveProducers.put(producerName, System.currentTimeMillis()); } @@ -456,6 +490,14 @@ public synchronized void purgeInactiveProducers() { long minimumActiveTimestamp = System.currentTimeMillis() - TimeUnit.MINUTES .toMillis(pulsar.getConfiguration().getBrokerDeduplicationProducerInactivityTimeoutMinutes()); + // if not enabled just clear all inactive producer record. + if (!isEnabled()) { + if (!inactiveProducers.isEmpty()) { + inactiveProducers.clear(); + } + return; + } + Iterator> mapIterator = inactiveProducers.entrySet().iterator(); boolean hasInactive = false; while (mapIterator.hasNext()) { @@ -482,6 +524,10 @@ public long getLastPublishedSequenceId(String producerName) { } public void takeSnapshot() { + if (!isEnabled()) { + return; + } + Integer interval = topic.getHierarchyTopicPolicies().getDeduplicationSnapshotIntervalSeconds().get(); long currentTimeStamp = System.currentTimeMillis(); if (interval == null || interval <= 0 @@ -504,5 +550,10 @@ ManagedCursor getManagedCursor() { return managedCursor; } + @VisibleForTesting + Map getInactiveProducers() { + return inactiveProducers; + } + private static final Logger log = LoggerFactory.getLogger(MessageDeduplication.class); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java index d96429693fda8..15f34258768c5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java @@ -55,6 +55,8 @@ import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; import org.apache.pulsar.common.util.Codec; import org.apache.pulsar.compaction.CompactedTopicUtils; +import org.apache.pulsar.compaction.Compactor; +import org.apache.pulsar.compaction.TopicCompactionService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -107,9 +109,9 @@ protected void scheduleReadOnActiveConsumer() { if (log.isDebugEnabled()) { log.debug("[{}] Rewind cursor and read more entries without delay", name); } - cursor.rewind(); - Consumer activeConsumer = ACTIVE_CONSUMER_UPDATER.get(this); + cursor.rewind(activeConsumer != null && activeConsumer.readCompacted()); + notifyActiveConsumerChanged(activeConsumer); readMoreEntries(activeConsumer); return; @@ -127,9 +129,9 @@ protected void scheduleReadOnActiveConsumer() { log.debug("[{}] Rewind cursor and read more entries after {} ms delay", name, serviceConfig.getActiveConsumerFailoverDelayTimeMillis()); } - cursor.rewind(); - Consumer activeConsumer = ACTIVE_CONSUMER_UPDATER.get(this); + cursor.rewind(activeConsumer != null && activeConsumer.readCompacted()); + notifyActiveConsumerChanged(activeConsumer); readMoreEntries(activeConsumer); readOnActiveConsumerTask = null; @@ -200,7 +202,7 @@ public synchronized void internalReadEntriesComplete(final List entries, log.debug("[{}] rewind because no available consumer found", name); } entries.forEach(Entry::release); - cursor.rewind(); + cursor.rewind(currentConsumer != null ? currentConsumer.readCompacted() : readConsumer.readCompacted()); if (currentConsumer != null) { notifyActiveConsumerChanged(currentConsumer); readMoreEntries(currentConsumer); @@ -295,7 +297,7 @@ private synchronized void internalRedeliverUnacknowledgedMessages(Consumer consu } cursor.cancelPendingReadRequest(); havePendingRead = false; - cursor.rewind(); + cursor.rewind(consumer.readCompacted()); if (log.isDebugEnabled()) { log.debug("[{}-{}] Cursor rewinded, redelivering unacknowledged messages. ", name, consumer); } @@ -349,9 +351,12 @@ protected void readMoreEntries(Consumer consumer) { } havePendingRead = true; if (consumer.readCompacted()) { - boolean readFromEarliest = isFirstRead && MessageId.earliest.equals(consumer.getStartMessageId()); - CompactedTopicUtils.asyncReadCompactedEntries(topic.getTopicCompactionService(), cursor, - messagesToRead, bytesToRead, readFromEarliest, this, true, consumer); + boolean readFromEarliest = isFirstRead && MessageId.earliest.equals(consumer.getStartMessageId()) + && (!cursor.isDurable() || cursor.getName().equals(Compactor.COMPACTION_SUBSCRIPTION) + || hasValidMarkDeletePosition(cursor)); + TopicCompactionService topicCompactionService = topic.getTopicCompactionService(); + CompactedTopicUtils.asyncReadCompactedEntries(topicCompactionService, cursor, messagesToRead, + bytesToRead, topic.getMaxReadPosition(), readFromEarliest, this, true, consumer); } else { ReadEntriesCtx readEntriesCtx = ReadEntriesCtx.create(consumer, consumer.getConsumerEpoch()); @@ -366,6 +371,13 @@ protected void readMoreEntries(Consumer consumer) { } } + private boolean hasValidMarkDeletePosition(ManagedCursor cursor) { + // If `markDeletedPosition.entryID == -1L` then the md-position is an invalid position, + // since the initial md-position of the consumer will be set to it. + // See ManagedLedgerImpl#asyncOpenCursor and ManagedLedgerImpl#getFirstPosition + return cursor.getMarkDeletedPosition() != null && cursor.getMarkDeletedPosition().getEntryId() == -1L; + } + @Override protected void reScheduleRead() { if (isRescheduleReadInProgress.compareAndSet(false, true)) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java index 020dc5323e55b..ac391c1050340 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentMessageExpiryMonitor.java @@ -18,8 +18,10 @@ */ package org.apache.pulsar.broker.service.persistent; +import com.google.common.annotations.VisibleForTesting; import java.util.Objects; import java.util.Optional; +import java.util.SortedMap; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.LongAdder; import javax.annotation.Nullable; @@ -30,8 +32,10 @@ import org.apache.bookkeeper.mledger.ManagedLedgerException.LedgerNotExistException; import org.apache.bookkeeper.mledger.ManagedLedgerException.NonRecoverableLedgerException; import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.bookkeeper.mledger.proto.MLDataFormats; import org.apache.pulsar.broker.service.MessageExpirer; import org.apache.pulsar.client.impl.MessageImpl; import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; @@ -48,7 +52,6 @@ public class PersistentMessageExpiryMonitor implements FindEntryCallback, Messag private final String topicName; private final Rate msgExpired; private final LongAdder totalMsgExpired; - private final boolean autoSkipNonRecoverableData; private final PersistentSubscription subscription; private static final int FALSE = 0; @@ -68,8 +71,12 @@ public PersistentMessageExpiryMonitor(PersistentTopic topic, String subscription this.subscription = subscription; this.msgExpired = new Rate(); this.totalMsgExpired = new LongAdder(); + } + + @VisibleForTesting + public boolean isAutoSkipNonRecoverableData() { // check to avoid test failures - this.autoSkipNonRecoverableData = this.cursor.getManagedLedger() != null + return this.cursor.getManagedLedger() != null && this.cursor.getManagedLedger().getConfig().isAutoSkipNonRecoverableData(); } @@ -78,7 +85,9 @@ public boolean expireMessages(int messageTTLInSeconds) { if (expirationCheckInProgressUpdater.compareAndSet(this, FALSE, TRUE)) { log.info("[{}][{}] Starting message expiry check, ttl= {} seconds", topicName, subName, messageTTLInSeconds); - + // First filter the entire Ledger reached TTL based on the Ledger closing time to avoid client clock skew + checkExpiryByLedgerClosureTime(cursor, messageTTLInSeconds); + // Some part of entries in active Ledger may have reached TTL, so we need to continue searching. cursor.asyncFindNewestMatching(ManagedCursor.FindPositionConstraint.SearchActiveEntries, entry -> { try { long entryTimestamp = Commands.getEntryTimestamp(entry.getDataBuffer()); @@ -100,6 +109,35 @@ public boolean expireMessages(int messageTTLInSeconds) { } } + private void checkExpiryByLedgerClosureTime(ManagedCursor cursor, int messageTTLInSeconds) { + if (messageTTLInSeconds <= 0) { + return; + } + if (cursor instanceof ManagedCursorImpl managedCursor) { + ManagedLedgerImpl managedLedger = (ManagedLedgerImpl) managedCursor.getManagedLedger(); + Position deletedPosition = managedCursor.getMarkDeletedPosition(); + SortedMap ledgerInfoSortedMap = + managedLedger.getLedgersInfo().subMap(deletedPosition.getLedgerId(), true, + managedLedger.getLedgersInfo().lastKey(), true); + MLDataFormats.ManagedLedgerInfo.LedgerInfo info = null; + for (MLDataFormats.ManagedLedgerInfo.LedgerInfo ledgerInfo : ledgerInfoSortedMap.values()) { + if (!ledgerInfo.hasTimestamp() || !MessageImpl.isEntryExpired(messageTTLInSeconds, + ledgerInfo.getTimestamp())) { + break; + } + info = ledgerInfo; + } + if (info != null && info.getLedgerId() > -1) { + PositionImpl position = PositionImpl.get(info.getLedgerId(), info.getEntries() - 1); + if (((PositionImpl) managedLedger.getLastConfirmedEntry()).compareTo(position) < 0) { + findEntryComplete(managedLedger.getLastConfirmedEntry(), null); + } else { + findEntryComplete(position, null); + } + } + } + } + @Override public boolean expireMessages(Position messagePosition) { // If it's beyond last position of this topic, do nothing. @@ -177,7 +215,8 @@ public void findEntryComplete(Position position, Object ctx) { if (position != null) { log.info("[{}][{}] Expiring all messages until position {}", topicName, subName, position); Position prevMarkDeletePos = cursor.getMarkDeletedPosition(); - cursor.asyncMarkDelete(position, markDeleteCallback, cursor.getNumberOfEntriesInBacklog(false)); + cursor.asyncMarkDelete(position, cursor.getProperties(), markDeleteCallback, + cursor.getNumberOfEntriesInBacklog(false)); if (!Objects.equals(cursor.getMarkDeletedPosition(), prevMarkDeletePos) && subscription != null) { subscription.updateLastMarkDeleteAdvancedTimestamp(); } @@ -195,7 +234,7 @@ public void findEntryFailed(ManagedLedgerException exception, Optional if (log.isDebugEnabled()) { log.debug("[{}][{}] Finding expired entry operation failed", topicName, subName, exception); } - if (autoSkipNonRecoverableData && failedReadPosition.isPresent() + if (isAutoSkipNonRecoverableData() && failedReadPosition.isPresent() && (exception instanceof NonRecoverableLedgerException)) { log.warn("[{}][{}] read failed from ledger at position:{} : {}", topicName, subName, failedReadPosition, exception.getMessage()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java index 1b8b47da78fed..b5852610c20cd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java @@ -29,6 +29,7 @@ import java.util.Map; import java.util.Optional; import java.util.TreeMap; +import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; @@ -199,7 +200,12 @@ public boolean setReplicated(boolean replicated) { if (this.cursor != null) { if (replicated) { - return this.cursor.putProperty(REPLICATED_SUBSCRIPTION_PROPERTY, 1L); + if (!config.isEnableReplicatedSubscriptions()) { + log.warn("[{}][{}] Failed set replicated subscription status to {}, please enable the " + + "configuration enableReplicatedSubscriptions", topicName, subName, replicated); + } else { + return this.cursor.putProperty(REPLICATED_SUBSCRIPTION_PROPERTY, 1L); + } } else { return this.cursor.removeProperty(REPLICATED_SUBSCRIPTION_PROPERTY); } @@ -506,9 +512,15 @@ public String getTypeString() { return "Null"; } - @Override public CompletableFuture analyzeBacklog(Optional position) { - + final ManagedLedger managedLedger = topic.getManagedLedger(); + final String newNonDurableCursorName = "analyze-backlog-" + UUID.randomUUID(); + ManagedCursor newNonDurableCursor; + try { + newNonDurableCursor = ((ManagedCursorImpl) cursor).duplicateNonDurableCursor(newNonDurableCursorName); + } catch (ManagedLedgerException e) { + return CompletableFuture.failedFuture(e); + } long start = System.currentTimeMillis(); if (log.isDebugEnabled()) { log.debug("[{}][{}] Starting to analyze backlog", topicName, subName); @@ -523,7 +535,7 @@ public CompletableFuture analyzeBacklog(Optional AtomicLong rejectedMessages = new AtomicLong(); AtomicLong rescheduledMessages = new AtomicLong(); - Position currentPosition = cursor.getMarkDeletedPosition(); + Position currentPosition = newNonDurableCursor.getMarkDeletedPosition(); if (log.isDebugEnabled()) { log.debug("[{}][{}] currentPosition {}", @@ -583,7 +595,7 @@ public CompletableFuture analyzeBacklog(Optional return true; }; - return cursor.scan( + CompletableFuture res = newNonDurableCursor.scan( position, condition, batchSize, @@ -610,7 +622,22 @@ public CompletableFuture analyzeBacklog(Optional topicName, subName, end - start, result); return result; }); + res.whenComplete((__, ex) -> { + managedLedger.asyncDeleteCursor(newNonDurableCursorName, + new AsyncCallbacks.DeleteCursorCallback(){ + @Override + public void deleteCursorComplete(Object ctx) { + // Nothing to do. + } + @Override + public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) { + log.warn("[{}][{}] Delete non-durable cursor[{}] failed when analyze backlog.", + topicName, subName, newNonDurableCursor.getName()); + } + }, null); + }); + return res; } @Override @@ -1160,16 +1187,20 @@ public SubscriptionStatsImpl getStats(Boolean getPreciseBacklog, boolean subscri } else { subStats.backlogSize = -1; } - if (getEarliestTimeInBacklog && subStats.msgBacklog > 0) { - ManagedLedgerImpl managedLedger = ((ManagedLedgerImpl) cursor.getManagedLedger()); - PositionImpl markDeletedPosition = (PositionImpl) cursor.getMarkDeletedPosition(); - long result = 0; - try { - result = managedLedger.getEarliestMessagePublishTimeOfPos(markDeletedPosition).get(); - } catch (InterruptedException | ExecutionException e) { - result = -1; + if (getEarliestTimeInBacklog) { + if (subStats.msgBacklog > 0) { + ManagedLedgerImpl managedLedger = ((ManagedLedgerImpl) cursor.getManagedLedger()); + PositionImpl markDeletedPosition = (PositionImpl) cursor.getMarkDeletedPosition(); + long result = 0; + try { + result = managedLedger.getEarliestMessagePublishTimeOfPos(markDeletedPosition).get(); + } catch (InterruptedException | ExecutionException e) { + result = -1; + } + subStats.earliestMsgPublishTimeInBacklog = result; + } else { + subStats.earliestMsgPublishTimeInBacklog = -1; } - subStats.earliestMsgPublishTimeInBacklog = result; } subStats.msgBacklogNoDelayed = subStats.msgBacklog - subStats.msgDelayed; subStats.msgRateExpired = expiryMonitor.getMessageExpiryRate(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java index 2b1700f36cb68..2752f24785324 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java @@ -162,6 +162,7 @@ import org.apache.pulsar.common.policies.data.stats.TopicStatsImpl; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.schema.SchemaData; +import org.apache.pulsar.common.protocol.schema.SchemaVersion; import org.apache.pulsar.common.schema.SchemaType; import org.apache.pulsar.common.topics.TopicCompactionStrategy; import org.apache.pulsar.common.util.Codec; @@ -213,7 +214,8 @@ public class PersistentTopic extends AbstractTopic implements Topic, AddEntryCal protected final MessageDeduplication messageDeduplication; private static final Long COMPACTION_NEVER_RUN = -0xfebecffeL; - private CompletableFuture currentCompaction = CompletableFuture.completedFuture(COMPACTION_NEVER_RUN); + private volatile CompletableFuture currentCompaction = CompletableFuture.completedFuture( + COMPACTION_NEVER_RUN); private TopicCompactionService topicCompactionService; // TODO: Create compaction strategy from topic policy when exposing strategic compaction to users. @@ -296,8 +298,6 @@ public PersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerS .build(); this.backloggedCursorThresholdEntries = brokerService.pulsar().getConfiguration().getManagedLedgerCursorBackloggedThreshold(); - registerTopicPolicyListener(); - this.messageDeduplication = new MessageDeduplication(brokerService.pulsar(), this, ledger); if (ledger.getProperties().containsKey(TOPIC_EPOCH_PROPERTY_NAME)) { topicEpoch = Optional.of(Long.parseLong(ledger.getProperties().get(TOPIC_EPOCH_PROPERTY_NAME))); @@ -305,11 +305,12 @@ public PersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerS TopicName topicName = TopicName.get(topic); if (brokerService.getPulsar().getConfiguration().isTransactionCoordinatorEnabled() - && !isEventSystemTopic(topicName)) { + && !isEventSystemTopic(topicName) + && !NamespaceService.isHeartbeatNamespace(topicName.getNamespaceObject())) { this.transactionBuffer = brokerService.getPulsar() .getTransactionBufferProvider().newTransactionBuffer(this); } else { - this.transactionBuffer = new TransactionBufferDisable(); + this.transactionBuffer = new TransactionBufferDisable(this); } transactionBuffer.syncMaxReadPositionForNormalPublish((PositionImpl) ledger.getLastConfirmedEntry()); if (ledger instanceof ShadowManagedLedgerImpl) { @@ -340,7 +341,7 @@ public CompletableFuture initialize() { .thenAcceptAsync(optPolicies -> { if (!optPolicies.isPresent()) { isEncryptionRequired = false; - updatePublishDispatcher(); + updatePublishRateLimiter(); updateResourceGroupLimiter(new Policies()); initializeDispatchRateLimiterIfNeeded(); updateSubscribeRateLimiter(); @@ -355,7 +356,7 @@ public CompletableFuture initialize() { updateSubscribeRateLimiter(); - updatePublishDispatcher(); + updatePublishRateLimiter(); updateResourceGroupLimiter(policies); @@ -402,7 +403,7 @@ public CompletableFuture initialize() { this.transactionBuffer = brokerService.getPulsar() .getTransactionBufferProvider().newTransactionBuffer(this); } else { - this.transactionBuffer = new TransactionBufferDisable(); + this.transactionBuffer = new TransactionBufferDisable(this); } shadowSourceTopic = null; } @@ -498,7 +499,7 @@ private PersistentSubscription createPersistentSubscription(String subscriptionN } } - private static boolean isCompactionSubscription(String subscriptionName) { + public static boolean isCompactionSubscription(String subscriptionName) { return COMPACTION_SUBSCRIPTION.equals(subscriptionName); } @@ -905,8 +906,8 @@ private CompletableFuture internalSubscribe(final TransportCnx cnx, St consumer.close(); } catch (BrokerServiceException e) { if (e instanceof ConsumerBusyException) { - log.warn("[{}][{}] Consumer {} {} already connected", - topic, subscriptionName, consumerId, consumerName); + log.warn("[{}][{}] Consumer {} {} already connected: {}", + topic, subscriptionName, consumerId, consumerName, e.getMessage()); } else if (e instanceof SubscriptionBusyException) { log.warn("[{}][{}] {}", topic, subscriptionName, e.getMessage()); } @@ -936,8 +937,8 @@ private CompletableFuture internalSubscribe(final TransportCnx cnx, St decrementUsageCount(); if (ex.getCause() instanceof ConsumerBusyException) { - log.warn("[{}][{}] Consumer {} {} already connected", topic, subscriptionName, consumerId, - consumerName); + log.warn("[{}][{}] Consumer {} {} already connected: {}", topic, subscriptionName, consumerId, + consumerName, ex.getCause().getMessage()); Consumer consumer = null; try { consumer = subscriptionFuture.isDone() ? getActiveConsumer(subscriptionFuture.get()) : null; @@ -977,7 +978,9 @@ public CompletableFuture subscribe(final TransportCnx cnx, String subs } private CompletableFuture getDurableSubscription(String subscriptionName, - InitialPosition initialPosition, long startMessageRollbackDurationSec, boolean replicated, + InitialPosition initialPosition, + long startMessageRollbackDurationSec, + boolean replicated, Map subscriptionProperties) { CompletableFuture subscriptionFuture = new CompletableFuture<>(); if (checkMaxSubscriptionsPerTopicExceed(subscriptionName)) { @@ -987,7 +990,6 @@ private CompletableFuture getDurableSubscription(String subscripti } Map properties = PersistentSubscription.getBaseCursorProperties(replicated); - ledger.asyncOpenCursor(Codec.encode(subscriptionName), initialPosition, properties, subscriptionProperties, new OpenCursorCallback() { @Override @@ -1165,16 +1167,16 @@ public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { private void asyncDeleteCursorWithClearDelayedMessage(String subscriptionName, CompletableFuture unsubscribeFuture) { - if (!isDelayedDeliveryEnabled() - || !(brokerService.getDelayedDeliveryTrackerFactory() instanceof BucketDelayedDeliveryTrackerFactory)) { - asyncDeleteCursor(subscriptionName, unsubscribeFuture); + PersistentSubscription persistentSubscription = subscriptions.get(subscriptionName); + if (persistentSubscription == null) { + log.warn("[{}][{}] Can't find subscription, skip delete cursor", topic, subscriptionName); + unsubscribeFuture.complete(null); return; } - PersistentSubscription persistentSubscription = subscriptions.get(subscriptionName); - if (persistentSubscription == null) { - log.warn("[{}][{}] Can't find subscription, skip clear delayed message", topic, subscriptionName); - asyncDeleteCursor(subscriptionName, unsubscribeFuture); + if (!isDelayedDeliveryEnabled() + || !(brokerService.getDelayedDeliveryTrackerFactory() instanceof BucketDelayedDeliveryTrackerFactory)) { + asyncDeleteCursorWithCleanCompactionLedger(persistentSubscription, unsubscribeFuture); return; } @@ -1189,7 +1191,7 @@ private void asyncDeleteCursorWithClearDelayedMessage(String subscriptionName, if (ex != null) { unsubscribeFuture.completeExceptionally(ex); } else { - asyncDeleteCursor(subscriptionName, unsubscribeFuture); + asyncDeleteCursorWithCleanCompactionLedger(persistentSubscription, unsubscribeFuture); } }); } @@ -1199,6 +1201,29 @@ private void asyncDeleteCursorWithClearDelayedMessage(String subscriptionName, dispatcher.clearDelayedMessages().whenComplete((__, ex) -> { if (ex != null) { unsubscribeFuture.completeExceptionally(ex); + } else { + asyncDeleteCursorWithCleanCompactionLedger(persistentSubscription, unsubscribeFuture); + } + }); + } + + private void asyncDeleteCursorWithCleanCompactionLedger(PersistentSubscription subscription, + CompletableFuture unsubscribeFuture) { + final String subscriptionName = subscription.getName(); + if ((!isCompactionSubscription(subscriptionName)) || !(subscription instanceof PulsarCompactorSubscription)) { + asyncDeleteCursor(subscriptionName, unsubscribeFuture); + return; + } + + currentCompaction.handle((__, e) -> { + if (e != null) { + log.warn("[{}][{}] Last compaction task failed", topic, subscriptionName); + } + return ((PulsarCompactorSubscription) subscription).cleanCompactedLedger(); + }).whenComplete((__, ex) -> { + if (ex != null) { + log.error("[{}][{}] Error cleaning compacted ledger", topic, subscriptionName, ex); + unsubscribeFuture.completeExceptionally(ex); } else { asyncDeleteCursor(subscriptionName, unsubscribeFuture); } @@ -1547,7 +1572,6 @@ private void disposeTopic(CompletableFuture closeFuture) { CompletableFuture checkReplicationAndRetryOnFailure() { CompletableFuture result = new CompletableFuture(); checkReplication().thenAccept(res -> { - log.info("[{}] Policies updated successfully", topic); result.complete(null); }).exceptionally(th -> { log.error("[{}] Policies update failed {}, scheduled retry in {} seconds", topic, th.getMessage(), @@ -1567,7 +1591,8 @@ public CompletableFuture checkDeduplicationStatus() { return messageDeduplication.checkStatus(); } - private CompletableFuture checkPersistencePolicies() { + @VisibleForTesting + CompletableFuture checkPersistencePolicies() { TopicName topicName = TopicName.get(topic); CompletableFuture future = new CompletableFuture<>(); brokerService.getManagedLedgerConfig(topicName).thenAccept(config -> { @@ -1594,6 +1619,11 @@ public CompletableFuture checkReplication() { } List configuredClusters = topicPolicies.getReplicationClusters().get(); + if (CollectionUtils.isEmpty(configuredClusters)) { + log.warn("[{}] No replication clusters configured", name); + return CompletableFuture.completedFuture(null); + } + int newMessageTTLInSeconds = topicPolicies.getMessageTTLInSeconds().get(); String localCluster = brokerService.pulsar().getConfiguration().getClusterName(); @@ -1668,11 +1698,11 @@ private CompletableFuture checkShadowReplication() { public void checkMessageExpiry() { int messageTtlInSeconds = topicPolicies.getMessageTTLInSeconds().get(); if (messageTtlInSeconds != 0) { - subscriptions.forEach((__, sub) -> sub.expireMessages(messageTtlInSeconds)); - replicators.forEach((__, replicator) - -> ((PersistentReplicator) replicator).expireMessages(messageTtlInSeconds)); - shadowReplicators.forEach((__, replicator) - -> ((PersistentReplicator) replicator).expireMessages(messageTtlInSeconds)); + subscriptions.forEach((__, sub) -> { + if (!isCompactionSubscription(sub.getName())) { + sub.expireMessages(messageTtlInSeconds); + } + }); } } @@ -2305,7 +2335,7 @@ public CompletableFuture asyncGetStats(boolean getPreciseBacklog stats.backlogSize = ledger.getEstimatedBacklogSize(); stats.deduplicationStatus = messageDeduplication.getStatus().toString(); stats.topicEpoch = topicEpoch.orElse(null); - stats.ownerBroker = brokerService.pulsar().getLookupServiceAddress(); + stats.ownerBroker = brokerService.pulsar().getBrokerId(); stats.offloadedStorageSize = ledger.getOffloadedSize(); stats.lastOffloadLedgerId = ledger.getLastOffloadedLedgerId(); stats.lastOffloadSuccessTimeStamp = ledger.getLastOffloadedSuccessTimestamp(); @@ -2344,6 +2374,15 @@ private Optional getCompactorMXBean() { return Optional.ofNullable(compactor).map(c -> c.getStats()); } + @Override + public CompletableFuture deleteSchema() { + if (TopicName.get(getName()).isPartitioned()) { + // Only delete schema when partitioned metadata is deleting. + return CompletableFuture.completedFuture(null); + } + return brokerService.deleteSchema(TopicName.get(getName())); + } + @Override public CompletableFuture getInternalStats(boolean includeLedgerMetadata) { @@ -2649,7 +2688,7 @@ public void checkGC() { replCloseFuture.thenCompose(v -> delete(deleteMode == InactiveTopicDeleteMode.delete_when_no_subscriptions, deleteMode == InactiveTopicDeleteMode.delete_when_subscriptions_caught_up, false)) - .thenApply((res) -> tryToDeletePartitionedMetadata()) + .thenCompose((res) -> tryToDeletePartitionedMetadata()) .thenRun(() -> log.info("[{}] Topic deleted successfully due to inactivity", topic)) .exceptionally(e -> { if (e.getCause() instanceof TopicBusyException) { @@ -2657,6 +2696,8 @@ public void checkGC() { if (log.isDebugEnabled()) { log.debug("[{}] Did not delete busy topic: {}", topic, e.getCause().getMessage()); } + } else if (e.getCause() instanceof UnsupportedOperationException) { + log.info("[{}] Skip to delete partitioned topic: {}", topic, e.getCause().getMessage()); } else { log.warn("[{}] Inactive topic deletion failed", topic, e); } @@ -2701,7 +2742,7 @@ private CompletableFuture tryToDeletePartitionedMetadata() { .filter(topicExist -> topicExist) .findAny(); if (anyExistPartition.isPresent()) { - log.error("[{}] Delete topic metadata failed because" + log.info("[{}] Delete topic metadata failed because" + " another partition exist.", topicName); throw new UnsupportedOperationException( String.format("Another partition exists for [%s].", @@ -2731,7 +2772,9 @@ public void checkInactiveSubscriptions() { .toMillis(nsExpirationTime == null ? defaultExpirationTime : nsExpirationTime); if (expirationTimeMillis > 0) { subscriptions.forEach((subName, sub) -> { - if (sub.dispatcher != null && sub.dispatcher.isConsumerConnected() || sub.isReplicated()) { + if (sub.dispatcher != null && sub.dispatcher.isConsumerConnected() + || sub.isReplicated() + || isCompactionSubscription(subName)) { return; } if (System.currentTimeMillis() - sub.cursor.getLastActive() > expirationTimeMillis) { @@ -2816,39 +2859,60 @@ public CompletableFuture onPoliciesUpdate(@Nonnull Policies data) { return CompletableFuture.completedFuture(null); } + // Update props. + // The component "EntryFilters" is update in the method "updateTopicPolicyByNamespacePolicy(data)". + // see more detail: https://github.com/apache/pulsar/pull/19364. updateTopicPolicyByNamespacePolicy(data); - + checkReplicatedSubscriptionControllerState(); isEncryptionRequired = data.encryption_required; - isAllowAutoUpdateSchema = data.is_allow_auto_update_schema; - updateDispatchRateLimiter(); - - updateSubscribeRateLimiter(); + // Apply policies for components. + List> applyPolicyTasks = applyUpdatedTopicPolicies(); + applyPolicyTasks.add(applyUpdatedNamespacePolicies(data)); + return FutureUtil.waitForAll(applyPolicyTasks) + .thenAccept(__ -> log.info("[{}] namespace-level policies updated successfully", topic)) + .exceptionally(ex -> { + log.error("[{}] update namespace polices : {} error", this.getName(), data, ex); + throw FutureUtil.wrapToCompletionException(ex); + }); + } - updatePublishDispatcher(); + private CompletableFuture applyUpdatedNamespacePolicies(Policies namespaceLevelPolicies) { + return FutureUtil.runWithCurrentThread(() -> updateResourceGroupLimiter(namespaceLevelPolicies)); + } - updateResourceGroupLimiter(data); + private List> applyUpdatedTopicPolicies() { + List> applyPoliciesFutureList = new ArrayList<>(); - List> producerCheckFutures = new ArrayList<>(producers.size()); - producers.values().forEach(producer -> producerCheckFutures.add( + // Client permission check. + subscriptions.forEach((subName, sub) -> { + sub.getConsumers().forEach(consumer -> applyPoliciesFutureList.add(consumer.checkPermissionsAsync())); + }); + producers.values().forEach(producer -> applyPoliciesFutureList.add( producer.checkPermissionsAsync().thenRun(producer::checkEncryption))); + // Check message expiry. + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> checkMessageExpiry())); - return FutureUtil.waitForAll(producerCheckFutures).thenCompose((__) -> { - return updateSubscriptionsDispatcherRateLimiter().thenCompose((___) -> { - replicators.forEach((name, replicator) -> replicator.updateRateLimiter()); - shadowReplicators.forEach((name, replicator) -> replicator.updateRateLimiter()); - checkMessageExpiry(); - CompletableFuture replicationFuture = checkReplicationAndRetryOnFailure(); - CompletableFuture dedupFuture = checkDeduplicationStatus(); - CompletableFuture persistentPoliciesFuture = checkPersistencePolicies(); - return CompletableFuture.allOf(replicationFuture, dedupFuture, persistentPoliciesFuture, - preCreateSubscriptionForCompactionIfNeeded()); - }); - }).exceptionally(ex -> { - log.error("[{}] update namespace polices : {} error", this.getName(), data, ex); - throw FutureUtil.wrapToCompletionException(ex); - }); + // Update rate limiters. + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> updateDispatchRateLimiter())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> updateSubscribeRateLimiter())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> updatePublishRateLimiter())); + + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> updateSubscriptionsDispatcherRateLimiter())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread( + () -> replicators.forEach((name, replicator) -> replicator.updateRateLimiter()))); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread( + () -> shadowReplicators.forEach((name, replicator) -> replicator.updateRateLimiter()))); + + // Other components. + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> checkReplicationAndRetryOnFailure())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> checkDeduplicationStatus())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread(() -> checkPersistencePolicies())); + applyPoliciesFutureList.add(FutureUtil.runWithCurrentThread( + () -> preCreateSubscriptionForCompactionIfNeeded())); + + return applyPoliciesFutureList; } /** @@ -3182,17 +3246,29 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { public synchronized void triggerCompaction() throws PulsarServerException, AlreadyRunningException { if (currentCompaction.isDone()) { + if (!lock.readLock().tryLock()) { + log.info("[{}] Conflict topic-close, topic-delete, skip triggering compaction", topic); + return; + } + try { + if (isClosingOrDeleting) { + log.info("[{}] Topic is closing or deleting, skip triggering compaction", topic); + return; + } - if (strategicCompactionMap.containsKey(topic)) { - currentCompaction = brokerService.pulsar().getStrategicCompactor() - .compact(topic, strategicCompactionMap.get(topic)); - } else { - currentCompaction = topicCompactionService.compact().thenApply(x -> null); + if (strategicCompactionMap.containsKey(topic)) { + currentCompaction = brokerService.pulsar().getStrategicCompactor() + .compact(topic, strategicCompactionMap.get(topic)); + } else { + currentCompaction = topicCompactionService.compact().thenApply(x -> null); + } + } finally { + lock.readLock().unlock(); } currentCompaction.whenComplete((ignore, ex) -> { - if (ex != null){ - log.warn("[{}] Compaction failure.", topic, ex); - } + if (ex != null) { + log.warn("[{}] Compaction failure.", topic, ex); + } }); } else { throw new AlreadyRunningException("Compaction already in progress"); @@ -3304,12 +3380,14 @@ private synchronized void checkReplicatedSubscriptionControllerState(boolean sho boolean isCurrentlyEnabled = replicatedSubscriptionsController.isPresent(); boolean isEnableReplicatedSubscriptions = brokerService.pulsar().getConfiguration().isEnableReplicatedSubscriptions(); + boolean replicationEnabled = this.topicPolicies.getReplicationClusters().get().size() > 1; - if (shouldBeEnabled && !isCurrentlyEnabled && isEnableReplicatedSubscriptions) { + if (shouldBeEnabled && !isCurrentlyEnabled && isEnableReplicatedSubscriptions && replicationEnabled) { log.info("[{}] Enabling replicated subscriptions controller", topic); replicatedSubscriptionsController = Optional.of(new ReplicatedSubscriptionsController(this, brokerService.pulsar().getConfiguration().getClusterName())); - } else if (isCurrentlyEnabled && !shouldBeEnabled || !isEnableReplicatedSubscriptions) { + } else if (isCurrentlyEnabled && !shouldBeEnabled || !isEnableReplicatedSubscriptions + || !replicationEnabled) { log.info("[{}] Disabled replicated subscriptions controller", topic); replicatedSubscriptionsController.ifPresent(ReplicatedSubscriptionsController::close); replicatedSubscriptionsController = Optional.empty(); @@ -3489,48 +3567,37 @@ public void onUpdate(TopicPolicies policies) { if (policies == null) { return; } + // Update props. + // The component "EntryFilters" is update in the method "updateTopicPolicy(data)". + // see more detail: https://github.com/apache/pulsar/pull/19364. updateTopicPolicy(policies); shadowTopics = policies.getShadowTopics(); - updateDispatchRateLimiter(); - updateSubscriptionsDispatcherRateLimiter().thenRun(() -> { - updatePublishDispatcher(); - updateSubscribeRateLimiter(); - replicators.forEach((name, replicator) -> replicator.updateRateLimiter()); - shadowReplicators.forEach((name, replicator) -> replicator.updateRateLimiter()); - checkMessageExpiry(); - checkReplicationAndRetryOnFailure(); - - checkDeduplicationStatus(); - - preCreateSubscriptionForCompactionIfNeeded(); + checkReplicatedSubscriptionControllerState(); - // update managed ledger config - checkPersistencePolicies(); - }).exceptionally(e -> { - Throwable t = e instanceof CompletionException ? e.getCause() : e; - log.error("[{}] update topic policy error: {}", topic, t.getMessage(), t); - return null; - }); + // Apply policies for components(not contains the specified policies which only defined in namespace policies). + FutureUtil.waitForAll(applyUpdatedTopicPolicies()) + .thenAccept(__ -> log.info("[{}] topic-level policies updated successfully", topic)) + .exceptionally(e -> { + Throwable t = FutureUtil.unwrapCompletionException(e); + log.error("[{}] update topic-level policy error: {}", topic, t.getMessage(), t); + return null; + }); } - private CompletableFuture updateSubscriptionsDispatcherRateLimiter() { - List> subscriptionCheckFutures = new ArrayList<>((int) subscriptions.size()); + private void updateSubscriptionsDispatcherRateLimiter() { subscriptions.forEach((subName, sub) -> { - List> consumerCheckFutures = new ArrayList<>(sub.getConsumers().size()); - sub.getConsumers().forEach(consumer -> consumerCheckFutures.add(consumer.checkPermissionsAsync())); - subscriptionCheckFutures.add(FutureUtil.waitForAll(consumerCheckFutures).thenRun(() -> { - Dispatcher dispatcher = sub.getDispatcher(); - if (dispatcher != null) { - dispatcher.updateRateLimiter(); - } - })); + Dispatcher dispatcher = sub.getDispatcher(); + if (dispatcher != null) { + dispatcher.updateRateLimiter(); + } }); - return FutureUtil.waitForAll(subscriptionCheckFutures); } protected CompletableFuture initTopicPolicy() { if (brokerService.pulsar().getConfig().isSystemTopicEnabled() && brokerService.pulsar().getConfig().isTopicLevelPoliciesEnabled()) { + brokerService.getPulsar().getTopicPoliciesService() + .registerListener(TopicName.getPartitionedTopicName(topic), this); return CompletableFuture.completedFuture(null).thenRunAsync(() -> onUpdate( brokerService.getPulsar().getTopicPoliciesService() .getTopicPoliciesIfExists(TopicName.getPartitionedTopicName(topic))), diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PulsarCompactorSubscription.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PulsarCompactorSubscription.java index dbb09f6ac39fd..fe13aeb572e2e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PulsarCompactorSubscription.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PulsarCompactorSubscription.java @@ -22,12 +22,15 @@ import static org.apache.pulsar.broker.service.AbstractBaseDispatcher.checkAndApplyReachedEndOfTopicOrTopicMigration; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.mledger.AsyncCallbacks.MarkDeleteCallback; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.Position; import org.apache.pulsar.common.api.proto.CommandAck.AckType; import org.apache.pulsar.compaction.CompactedTopic; +import org.apache.pulsar.compaction.CompactedTopicContext; +import org.apache.pulsar.compaction.CompactedTopicImpl; import org.apache.pulsar.compaction.Compactor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -106,5 +109,19 @@ public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { } } + CompletableFuture cleanCompactedLedger() { + final CompletableFuture compactedTopicContextFuture = + ((CompactedTopicImpl) compactedTopic).getCompactedTopicContextFuture(); + if (compactedTopicContextFuture != null) { + return compactedTopicContextFuture.thenCompose(context -> { + long compactedLedgerId = context.getLedger().getId(); + ((CompactedTopicImpl) compactedTopic).reset(); + return compactedTopic.deleteCompactedLedger(compactedLedgerId); + }); + } else { + return CompletableFuture.completedFuture(null); + } + } + private static final Logger log = LoggerFactory.getLogger(PulsarCompactorSubscription.class); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ShadowReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ShadowReplicator.java index fb306348bcdbb..b48f748bf5dbf 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ShadowReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ShadowReplicator.java @@ -76,18 +76,6 @@ protected boolean replicateEntries(List entries) { continue; } - if (msg.isExpired(messageTTLInSeconds)) { - msgExpired.recordEvent(0 /* no value stat */); - if (log.isDebugEnabled()) { - log.debug("[{}] Discarding expired message at position {}, replicateTo {}", - replicatorId, entry.getPosition(), msg.getReplicateTo()); - } - cursor.asyncDelete(entry.getPosition(), this, entry.getPosition()); - entry.release(); - msg.recycle(); - continue; - } - if (STATE_UPDATER.get(this) != State.Started || isLocalMessageSkippedOnce) { // The producer is not ready yet after having stopped/restarted. Drop the message because it will // recovered when the producer is ready diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java index 78e30f6fff827..c509764bf6710 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java @@ -707,7 +707,8 @@ public static Exception bkException(String operation, int rc, long ledgerId, lon message += " - entry=" + entryId; } boolean recoverable = rc != BKException.Code.NoSuchLedgerExistsException - && rc != BKException.Code.NoSuchEntryException; + && rc != BKException.Code.NoSuchEntryException + && rc != BKException.Code.NoSuchLedgerExistsOnMetadataServerException; return new SchemaException(recoverable, message); } @@ -716,7 +717,8 @@ public static CompletableFuture ignoreUnrecoverableBKException(Completabl if (t.getCause() != null && (t.getCause() instanceof SchemaException) && !((SchemaException) t.getCause()).isRecoverable()) { - // Meeting NoSuchLedgerExistsException or NoSuchEntryException when reading schemas in + // Meeting NoSuchLedgerExistsException, NoSuchEntryException or + // NoSuchLedgerExistsOnMetadataServerException when reading schemas in // bookkeeper. This also means that the data has already been deleted by other operations // in deleting schema. if (log.isDebugEnabled()) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/DimensionStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/DimensionStats.java index 1b6f981ca4e21..54965e4c783d8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/DimensionStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/DimensionStats.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.stats; +import static com.google.common.base.Preconditions.checkArgument; import static io.prometheus.client.CollectorRegistry.defaultRegistry; import io.prometheus.client.Collector; import io.prometheus.client.Summary; @@ -70,6 +71,7 @@ public DimensionStats(String name, long updateDurationInSec) { } public void recordDimensionTimeValue(long latency, TimeUnit unit) { + checkArgument(latency >= 0); summary.observe(unit.toMillis(latency)); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java index 9fe5588044d2f..d0dc4fe2a7e7d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java @@ -34,7 +34,7 @@ public class AggregatedNamespaceStats { public double throughputIn; public double throughputOut; - public long messageAckRate; + public double messageAckRate; public long bytesInCounter; public long msgInCounter; public long bytesOutCounter; @@ -64,7 +64,7 @@ public class AggregatedNamespaceStats { long compactionCompactedEntriesCount; long compactionCompactedEntriesSize; StatsBuckets compactionLatencyBuckets = new StatsBuckets(CompactionRecord.WRITE_LATENCY_BUCKETS_USEC); - int delayedMessageIndexSizeInBytes; + long delayedMessageIndexSizeInBytes; Map bucketDelayedIndexStats = new HashMap<>(); @@ -182,14 +182,34 @@ public void reset() { rateOut = 0; throughputIn = 0; throughputOut = 0; + messageAckRate = 0; + bytesInCounter = 0; + msgInCounter = 0; + + bytesOutCounter = 0; + msgOutCounter = 0; msgBacklog = 0; msgDelayed = 0; + ongoingTxnCount = 0; + abortedTxnCount = 0; + committedTxnCount = 0; + backlogQuotaLimit = 0; backlogQuotaLimitTime = -1; replicationStats.clear(); subscriptionStats.clear(); + + compactionRemovedEventCount = 0; + compactionSucceedCount = 0; + compactionFailedCount = 0; + compactionDurationTimeInMills = 0; + compactionReadThroughput = 0; + compactionWriteThroughput = 0; + compactionCompactedEntriesCount = 0; + compactionCompactedEntriesSize = 0; + delayedMessageIndexSizeInBytes = 0; bucketDelayedIndexStats.clear(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/LongAdderCounter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/LongAdderCounter.java index 8ade2bc883f9a..c2816f5a2a013 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/LongAdderCounter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/LongAdderCounter.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.stats.prometheus.metrics; +import static com.google.common.base.Preconditions.checkArgument; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; import org.apache.bookkeeper.stats.Counter; @@ -57,6 +58,7 @@ public void addCount(long delta) { @Override public void addLatency(long eventLatency, TimeUnit unit) { + checkArgument(eventLatency >= 0); long valueMillis = unit.toMillis(eventLatency); counter.add(valueMillis); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TopicPoliciesSystemTopicClient.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TopicPoliciesSystemTopicClient.java index 3fd8921c15efa..b7cff2e08c2d0 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TopicPoliciesSystemTopicClient.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TopicPoliciesSystemTopicClient.java @@ -30,6 +30,8 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.TypedMessageBuilder; +import org.apache.pulsar.client.api.schema.SchemaDefinition; +import org.apache.pulsar.client.internal.DefaultImplementation; import org.apache.pulsar.common.events.ActionType; import org.apache.pulsar.common.events.PulsarEvent; import org.apache.pulsar.common.naming.TopicName; @@ -41,13 +43,17 @@ */ public class TopicPoliciesSystemTopicClient extends SystemTopicClientBase { + static Schema avroSchema = DefaultImplementation.getDefaultImplementation() + .newAvroSchema(SchemaDefinition.builder().withPojo(PulsarEvent.class).build()); + public TopicPoliciesSystemTopicClient(PulsarClient client, TopicName topicName) { super(client, topicName); + } @Override protected CompletableFuture> newWriterAsyncInternal() { - return client.newProducer(Schema.AVRO(PulsarEvent.class)) + return client.newProducer(avroSchema) .topic(topicName.toString()) .enableBatching(false) .createAsync() @@ -61,7 +67,7 @@ protected CompletableFuture> newWriterAsyncInternal() { @Override protected CompletableFuture> newReaderAsyncInternal() { - return client.newReader(Schema.AVRO(PulsarEvent.class)) + return client.newReader(avroSchema) .topic(topicName.toString()) .startMessageId(MessageId.earliest) .readCompacted(true) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBuffer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBuffer.java index bc2dd58a5812e..978536c5f4e36 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBuffer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBuffer.java @@ -212,9 +212,12 @@ public TransactionBufferReader newReader(long sequenceId) throws final ConcurrentMap buffers; final Map> txnIndex; + private final Topic topic; + public InMemTransactionBuffer(Topic topic) { this.buffers = new ConcurrentHashMap<>(); this.txnIndex = new HashMap<>(); + this.topic = topic; } @Override @@ -372,7 +375,7 @@ public void syncMaxReadPositionForNormalPublish(PositionImpl position) { @Override public PositionImpl getMaxReadPosition() { - return PositionImpl.LATEST; + return (PositionImpl) topic.getLastPosition(); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java index 3c13be220869f..a36216bd6258b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java @@ -37,6 +37,7 @@ import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.collections4.map.LinkedMap; import org.apache.pulsar.broker.service.BrokerServiceException; @@ -169,13 +170,15 @@ public void handleTxnEntry(Entry entry) { if (msgMetadata != null && msgMetadata.hasTxnidMostBits() && msgMetadata.hasTxnidLeastBits()) { TxnID txnID = new TxnID(msgMetadata.getTxnidMostBits(), msgMetadata.getTxnidLeastBits()); PositionImpl position = PositionImpl.get(entry.getLedgerId(), entry.getEntryId()); - if (Markers.isTxnMarker(msgMetadata)) { - if (Markers.isTxnAbortMarker(msgMetadata)) { - snapshotAbortedTxnProcessor.putAbortedTxnAndPosition(txnID, position); + synchronized (TopicTransactionBuffer.this) { + if (Markers.isTxnMarker(msgMetadata)) { + if (Markers.isTxnAbortMarker(msgMetadata)) { + snapshotAbortedTxnProcessor.putAbortedTxnAndPosition(txnID, position); + } + updateMaxReadPosition(txnID); + } else { + handleTransactionMessage(txnID, position); } - updateMaxReadPosition(txnID); - } else { - handleTransactionMessage(txnID, position); } } } @@ -286,8 +289,8 @@ private void handleTransactionMessage(TxnID txnId, Position position) { .checkAbortedTransaction(txnId)) { ongoingTxns.put(txnId, (PositionImpl) position); PositionImpl firstPosition = ongoingTxns.get(ongoingTxns.firstKey()); - //max read position is less than first ongoing transaction message position, so entryId -1 - maxReadPosition = PositionImpl.get(firstPosition.getLedgerId(), firstPosition.getEntryId() - 1); + // max read position is less than first ongoing transaction message position + maxReadPosition = ((ManagedLedgerImpl) topic.getManagedLedger()).getPreviousPosition(firstPosition); } } @@ -361,10 +364,10 @@ public void addComplete(Position position, ByteBuf entryData, Object ctx) { updateMaxReadPosition(txnID); snapshotAbortedTxnProcessor.trimExpiredAbortedTxns(); takeSnapshotByChangeTimes(); + txnAbortedCounter.increment(); + completableFuture.complete(null); + handleLowWaterMark(txnID, lowWaterMark); } - txnAbortedCounter.increment(); - completableFuture.complete(null); - handleLowWaterMark(txnID, lowWaterMark); } @Override @@ -446,8 +449,7 @@ void updateMaxReadPosition(TxnID txnID) { ongoingTxns.remove(txnID); if (!ongoingTxns.isEmpty()) { PositionImpl position = ongoingTxns.get(ongoingTxns.firstKey()); - //max read position is less than first ongoing transaction message position, so entryId -1 - maxReadPosition = PositionImpl.get(position.getLedgerId(), position.getEntryId() - 1); + maxReadPosition = ((ManagedLedgerImpl) topic.getManagedLedger()).getPreviousPosition(position); } else { maxReadPosition = (PositionImpl) topic.getManagedLedger().getLastConfirmedEntry(); } @@ -473,7 +475,7 @@ public CompletableFuture closeAsync() { } @Override - public boolean isTxnAborted(TxnID txnID, PositionImpl readPosition) { + public synchronized boolean isTxnAborted(TxnID txnID, PositionImpl readPosition) { return snapshotAbortedTxnProcessor.checkAbortedTransaction(txnID); } @@ -510,9 +512,11 @@ public PositionImpl getMaxReadPosition() { @Override public TransactionInBufferStats getTransactionInBufferStats(TxnID txnID) { TransactionInBufferStats transactionInBufferStats = new TransactionInBufferStats(); - transactionInBufferStats.aborted = isTxnAborted(txnID, null); - if (ongoingTxns.containsKey(txnID)) { - transactionInBufferStats.startPosition = ongoingTxns.get(txnID).toString(); + synchronized (this) { + transactionInBufferStats.aborted = isTxnAborted(txnID, null); + if (ongoingTxns.containsKey(txnID)) { + transactionInBufferStats.startPosition = ongoingTxns.get(txnID).toString(); + } } return transactionInBufferStats; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferDisable.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferDisable.java index 7c74b52951e28..9de0888ae5b0b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferDisable.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferDisable.java @@ -25,6 +25,7 @@ import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.service.BrokerServiceException; +import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.transaction.buffer.AbortedTxnProcessor; import org.apache.pulsar.broker.transaction.buffer.TransactionBuffer; import org.apache.pulsar.broker.transaction.buffer.TransactionBufferReader; @@ -40,6 +41,11 @@ @Slf4j public class TransactionBufferDisable implements TransactionBuffer { + private final Topic topic; + public TransactionBufferDisable(Topic topic) { + this.topic = topic; + } + @Override public CompletableFuture getTransactionMeta(TxnID txnID) { return CompletableFuture.completedFuture(null); @@ -91,7 +97,7 @@ public void syncMaxReadPositionForNormalPublish(PositionImpl position) { @Override public PositionImpl getMaxReadPosition() { - return PositionImpl.LATEST; + return (PositionImpl) topic.getLastPosition(); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java index 48dcf259edb1b..9aac9ab64d0fd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java @@ -61,6 +61,8 @@ public class TransactionBufferHandlerImpl implements TransactionBufferHandler { private final PulsarService pulsarService; private final PulsarClientImpl pulsarClient; + private final int randomKeyForSelectConnection; + private static final AtomicIntegerFieldUpdater REQUEST_CREDITS_UPDATER = AtomicIntegerFieldUpdater.newUpdater(TransactionBufferHandlerImpl.class, "requestCredits"); private volatile int requestCredits; @@ -74,6 +76,7 @@ public TransactionBufferHandlerImpl(PulsarService pulsarService, HashedWheelTime this.operationTimeoutInMills = operationTimeoutInMills; this.timer = timer; this.requestCredits = Math.max(100, maxConcurrentRequests); + this.randomKeyForSelectConnection = pulsarClient.getCnxPool().genRandomKeyToSelectCon(); } @Override @@ -134,8 +137,9 @@ public void endTxn(OpRequestSend op) { if (clientCnx.ctx().channel().isActive()) { clientCnx.registerTransactionBufferHandler(TransactionBufferHandlerImpl.this); outstandingRequests.put(op.requestId, op); + final long requestId = op.requestId; timer.newTimeout(timeout -> { - OpRequestSend peek = outstandingRequests.remove(op.requestId); + OpRequestSend peek = outstandingRequests.remove(requestId); if (peek != null && !peek.cb.isDone() && !peek.cb.isCompletedExceptionally()) { peek.cb.completeExceptionally(new TransactionBufferClientException .RequestTimeoutException()); @@ -296,7 +300,7 @@ protected OpRequestSend newObject(Handle handle) { } public CompletableFuture getClientCnxWithLookup(String topic) { - return pulsarClient.getConnection(topic); + return pulsarClient.getConnection(topic, randomKeyForSelectConnection); } public CompletableFuture getClientCnx(String topic) { @@ -317,7 +321,8 @@ public CompletableFuture getClientCnx(String topic) { } InetSocketAddress brokerAddress = InetSocketAddress.createUnresolved(uri.getHost(), uri.getPort()); - return pulsarClient.getConnection(brokerAddress, brokerAddress); + return pulsarClient.getConnection(brokerAddress, brokerAddress, + randomKeyForSelectConnection); } else { // Bundle is unloading, lookup topic return getClientCnxWithLookup(topic); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java index ecc6599ce52b5..5308648b80c1d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java @@ -159,7 +159,7 @@ public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { , originPersistentTopic.getName(), subscription.getName(), exception); pendingAckStoreFuture.completeExceptionally(exception); } - }, () -> true, null); + }, () -> CompletableFuture.completedFuture(true), null); }).exceptionally(e -> { Throwable t = FutureUtil.unwrapCompletionException(e); log.error("[{}] [{}] Failed to get managedLedger config when init pending ack store!", diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java index fa121b8eb4d60..cde555e24aaff 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java @@ -56,7 +56,9 @@ import org.apache.pulsar.broker.authentication.AuthenticationDataSource; import org.apache.pulsar.broker.authentication.AuthenticationParameters; import org.apache.pulsar.broker.authorization.AuthorizationService; +import org.apache.pulsar.broker.loadbalance.LoadManager; import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; +import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; import org.apache.pulsar.broker.namespace.LookupOptions; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.resources.BookieResources; @@ -92,6 +94,7 @@ import org.apache.pulsar.common.policies.path.PolicyPath; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.metadata.api.MetadataStoreException; +import org.apache.pulsar.metadata.api.coordination.LockManager; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; import org.slf4j.Logger; @@ -530,6 +533,7 @@ protected static CompletableFuture getClusterDataIfDifferentCluster pulsar.getPulsarResources().getClusterResources().getClusterAsync(cluster) .whenComplete((clusterDataResult, ex) -> { if (ex != null) { + log.warn("[{}] Load cluster data failed: requested={}", clientAppId, cluster); clusterDataFuture.completeExceptionally(FutureUtil.unwrapCompletionException(ex)); return; } @@ -1195,24 +1199,43 @@ protected CompletableFuture canUpdateCluster(String tenant, Set ol /** * Redirect the call to the specified broker. * - * @param broker - * Broker name + * @param brokerId broker's id (lookup service address) */ - protected void validateBrokerName(String broker) { - String brokerUrl = String.format("http://%s", broker); - String brokerUrlTls = String.format("https://%s", broker); - if (!brokerUrl.equals(pulsar().getSafeWebServiceAddress()) - && !brokerUrlTls.equals(pulsar().getWebServiceAddressTls())) { - String[] parts = broker.split(":"); - checkArgument(parts.length == 2, String.format("Invalid broker url %s", broker)); - String host = parts[0]; - int port = Integer.parseInt(parts[1]); - - URI redirect = UriBuilder.fromUri(uri.getRequestUri()).host(host).port(port).build(); - log.debug("[{}] Redirecting the rest call to {}: broker={}", clientAppId(), redirect, broker); - throw new WebApplicationException(Response.temporaryRedirect(redirect).build()); - + protected CompletableFuture maybeRedirectToBroker(String brokerId) { + // backwards compatibility + String cleanedBrokerId = brokerId.replaceFirst("http[s]?://", ""); + if (pulsar.getBrokerId().equals(cleanedBrokerId) + // backwards compatibility + || ("http://" + cleanedBrokerId).equals(pulsar().getWebServiceAddress()) + || ("https://" + cleanedBrokerId).equals(pulsar().getWebServiceAddressTls())) { + // no need to redirect, the current broker matches the given broker id + return CompletableFuture.completedFuture(null); } + LockManager brokerLookupDataLockManager = + pulsar().getCoordinationService().getLockManager(BrokerLookupData.class); + return brokerLookupDataLockManager.readLock(LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + cleanedBrokerId) + .thenAccept(brokerLookupDataOptional -> { + if (brokerLookupDataOptional.isEmpty()) { + throw new RestException(Status.NOT_FOUND, + "Broker id '" + brokerId + "' not found in available brokers."); + } + brokerLookupDataOptional.ifPresent(brokerLookupData -> { + URI targetBrokerUri; + if ((isRequestHttps() || StringUtils.isBlank(brokerLookupData.getWebServiceUrl())) + && StringUtils.isNotBlank(brokerLookupData.getWebServiceUrlTls())) { + targetBrokerUri = URI.create(brokerLookupData.getWebServiceUrlTls()); + } else { + targetBrokerUri = URI.create(brokerLookupData.getWebServiceUrl()); + } + URI redirect = UriBuilder.fromUri(uri.getRequestUri()) + .scheme(targetBrokerUri.getScheme()) + .host(targetBrokerUri.getHost()) + .port(targetBrokerUri.getPort()).build(); + log.debug("[{}] Redirecting the rest call to {}: broker={}", clientAppId(), redirect, + cleanedBrokerId); + throw new WebApplicationException(Response.temporaryRedirect(redirect).build()); + }); + }); } public void validateTopicPolicyOperation(TopicName topicName, PolicyName policy, PolicyOperation operation) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchConverter.java b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchConverter.java index 54d2ff867a629..008f45e63cb3c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchConverter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchConverter.java @@ -44,10 +44,20 @@ public class RawBatchConverter { public static boolean isReadableBatch(RawMessage msg) { ByteBuf payload = msg.getHeadersAndPayload(); MessageMetadata metadata = Commands.parseMessageMetadata(payload); + return isReadableBatch(metadata); + } + + public static boolean isReadableBatch(MessageMetadata metadata) { return metadata.hasNumMessagesInBatch() && metadata.getEncryptionKeysCount() == 0; } public static List> extractIdsAndKeysAndSize(RawMessage msg) + throws IOException { + return extractIdsAndKeysAndSize(msg, true); + } + + public static List> extractIdsAndKeysAndSize(RawMessage msg, + boolean extractNullKey) throws IOException { checkArgument(msg.getMessageIdData().getBatchIndex() == -1); @@ -71,7 +81,7 @@ public static List> extractIdsAndKey msg.getMessageIdData().getEntryId(), msg.getMessageIdData().getPartition(), i); - if (!smm.isCompactedOut()) { + if (!smm.isCompactedOut() && (extractNullKey || smm.hasPartitionKey())) { idsAndKeysAndSize.add(ImmutableTriple.of(id, smm.hasPartitionKey() ? smm.getPartitionKey() : null, smm.hasPayloadSize() ? smm.getPayloadSize() : 0)); @@ -82,6 +92,11 @@ public static List> extractIdsAndKey return idsAndKeysAndSize; } + public static Optional rebatchMessage(RawMessage msg, + BiPredicate filter) throws IOException { + return rebatchMessage(msg, filter, true); + } + /** * Take a batched message and a filter, and returns a message with the only the sub-messages * which match the filter. Returns an empty optional if no messages match. @@ -89,7 +104,8 @@ public static List> extractIdsAndKey * NOTE: this message does not alter the reference count of the RawMessage argument. */ public static Optional rebatchMessage(RawMessage msg, - BiPredicate filter) + BiPredicate filter, + boolean retainNullKey) throws IOException { checkArgument(msg.getMessageIdData().getBatchIndex() == -1); @@ -124,10 +140,19 @@ public static Optional rebatchMessage(RawMessage msg, msg.getMessageIdData().getEntryId(), msg.getMessageIdData().getPartition(), i); - if (!singleMessageMetadata.hasPartitionKey()) { - messagesRetained++; - Commands.serializeSingleMessageInBatchWithPayload(singleMessageMetadata, - singleMessagePayload, batchBuffer); + if (singleMessageMetadata.isCompactedOut()) { + // we may read compacted out message from the compacted topic + Commands.serializeSingleMessageInBatchWithPayload(emptyMetadata, + Unpooled.EMPTY_BUFFER, batchBuffer); + } else if (!singleMessageMetadata.hasPartitionKey()) { + if (retainNullKey) { + messagesRetained++; + Commands.serializeSingleMessageInBatchWithPayload(singleMessageMetadata, + singleMessagePayload, batchBuffer); + } else { + Commands.serializeSingleMessageInBatchWithPayload(emptyMetadata, + Unpooled.EMPTY_BUFFER, batchBuffer); + } } else if (filter.test(singleMessageMetadata.getPartitionKey(), id) && singleMessagePayload.readableBytes() > 0) { messagesRetained++; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImpl.java index 7e1c2cd5e3fe3..374f1e30c0a89 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImpl.java @@ -23,6 +23,7 @@ import java.util.Set; import org.apache.pulsar.client.api.CryptoKeyReader; import org.apache.pulsar.client.api.MessageCrypto; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.impl.crypto.MessageCryptoBc; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; @@ -44,17 +45,17 @@ * [(k1, v1), (k2, v1), (k3, v1), (k1, v2), (k2, v2), (k3, v2), (k1, v3), (k2, v3), (k3, v3)] */ public class RawBatchMessageContainerImpl extends BatchMessageContainerImpl { - MessageCrypto msgCrypto; - Set encryptionKeys; - CryptoKeyReader cryptoKeyReader; + private MessageCrypto msgCrypto; + private Set encryptionKeys; + private CryptoKeyReader cryptoKeyReader; + private MessageIdAdv lastAddedMessageId; - public RawBatchMessageContainerImpl(int maxNumMessagesInBatch, int maxBytesInBatch) { + public RawBatchMessageContainerImpl() { super(); this.compressionType = CompressionType.NONE; this.compressor = new CompressionCodecNone(); - this.maxNumMessagesInBatch = maxNumMessagesInBatch; - this.maxBytesInBatch = maxBytesInBatch; } + private ByteBuf encrypt(ByteBuf compressedPayload) { if (msgCrypto == null) { return compressedPayload; @@ -90,6 +91,28 @@ public void setCryptoKeyReader(CryptoKeyReader cryptoKeyReader) { this.cryptoKeyReader = cryptoKeyReader; } + @Override + public boolean add(MessageImpl msg, SendCallback callback) { + this.lastAddedMessageId = (MessageIdAdv) msg.getMessageId(); + return super.add(msg, callback); + } + + @Override + protected boolean isBatchFull() { + return false; + } + + @Override + public boolean haveEnoughSpace(MessageImpl msg) { + if (lastAddedMessageId == null) { + return true; + } + // Keep same batch compact to same batch. + MessageIdAdv msgId = (MessageIdAdv) msg.getMessageId(); + return msgId.getLedgerId() == lastAddedMessageId.getLedgerId() + && msgId.getEntryId() == lastAddedMessageId.getEntryId(); + } + /** * Serializes the batched messages and return the ByteBuf. * It sets the CompressionType and Encryption Keys from the batched messages. @@ -164,8 +187,15 @@ public ByteBuf toByteBuf() { idData.writeTo(buf); buf.writeInt(metadataAndPayload.readableBytes()); buf.writeBytes(metadataAndPayload); + metadataAndPayload.release(); encryptedPayload.release(); clear(); return buf; } + + @Override + public void clear() { + this.lastAddedMessageId = null; + super.clear(); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java index 9a1c972b2cc98..f65232413991f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java @@ -59,6 +59,7 @@ public RawReaderImpl(PulsarClientImpl client, String topic, String subscription, consumerConfiguration.setReceiverQueueSize(DEFAULT_RECEIVER_QUEUE_SIZE); consumerConfiguration.setReadCompacted(true); consumerConfiguration.setSubscriptionInitialPosition(SubscriptionInitialPosition.Earliest); + consumerConfiguration.setAckReceiptEnabled(true); consumer = new RawConsumerImpl(client, consumerConfiguration, consumerFuture); @@ -122,7 +123,7 @@ static class RawConsumerImpl extends ConsumerImpl { MessageId.earliest, 0 /* startMessageRollbackDurationInSec */, Schema.BYTES, null, - true + false ); incomingRawMessages = new GrowableArrayBlockingQueue<>(); pendingRawReceives = new ConcurrentLinkedQueue<>(); @@ -151,14 +152,13 @@ void tryCompletePending() { // TODO message validation numMsg = 1; } + MessageIdData messageId = messageAndCnx.msg.getMessageIdData(); + lastDequeuedMessageId = new BatchMessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), + messageId.getPartition(), numMsg - 1); if (!future.complete(messageAndCnx.msg)) { messageAndCnx.msg.close(); closeAsync(); } - MessageIdData messageId = messageAndCnx.msg.getMessageIdData(); - lastDequeuedMessageId = new BatchMessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), - messageId.getPartition(), numMsg - 1); - ClientCnx currentCnx = cnx(); if (currentCnx == messageAndCnx.cnx) { increaseAvailablePermits(currentCnx, numMsg); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java index 660c7ea779741..146ba4327d252 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java @@ -24,6 +24,7 @@ import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.service.Consumer; public interface CompactedTopic { @@ -34,11 +35,14 @@ public interface CompactedTopic { * Read entries from compacted topic. * * @deprecated Use {@link CompactedTopicUtils#asyncReadCompactedEntries(TopicCompactionService, ManagedCursor, - * int, long, boolean, ReadEntriesCallback, boolean, Consumer)} instead. + * int, long, org.apache.bookkeeper.mledger.impl.PositionImpl, boolean, ReadEntriesCallback, boolean, Consumer)} + * instead. */ @Deprecated void asyncReadEntriesOrWait(ManagedCursor cursor, - int numberOfEntriesToRead, + int maxEntries, + long bytesToRead, + PositionImpl maxReadPosition, boolean isFirstRead, ReadEntriesCallback callback, Consumer consumer); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java index fd20c31d32fb8..0352808110190 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java @@ -32,6 +32,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.function.Predicate; import javax.annotation.Nullable; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; @@ -43,6 +44,7 @@ import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.EntryImpl; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.service.Consumer; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherSingleActiveConsumer.ReadEntriesCtx; @@ -53,6 +55,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +/** + * Note: If you want to guarantee that strong consistency between `compactionHorizon` and `compactedTopicContext`, + * you need to call getting them method in "synchronized(CompactedTopicImpl){ ... }" lock block. + */ public class CompactedTopicImpl implements CompactedTopic { static final long NEWER_THAN_COMPACTED = -0xfeed0fbaL; static final long COMPACT_LEDGER_EMPTY = -0xfeed0fbbL; @@ -70,14 +76,14 @@ public CompactedTopicImpl(BookKeeper bk) { @Override public CompletableFuture newCompactedLedger(Position p, long compactedLedgerId) { synchronized (this) { - compactionHorizon = (PositionImpl) p; - CompletableFuture previousContext = compactedTopicContext; compactedTopicContext = openCompactedLedger(bk, compactedLedgerId); + compactionHorizon = (PositionImpl) p; + // delete the ledger from the old context once the new one is open - return compactedTopicContext.thenCompose(__ -> - previousContext != null ? previousContext : CompletableFuture.completedFuture(null)); + return compactedTopicContext.thenCompose( + __ -> previousContext != null ? previousContext : CompletableFuture.completedFuture(null)); } } @@ -89,11 +95,17 @@ public CompletableFuture deleteCompactedLedger(long compactedLedgerId) { @Override @Deprecated public void asyncReadEntriesOrWait(ManagedCursor cursor, - int numberOfEntriesToRead, + int maxEntries, + long bytesToRead, + PositionImpl maxReadPosition, boolean isFirstRead, ReadEntriesCallback callback, Consumer consumer) { PositionImpl cursorPosition; - if (isFirstRead && MessageId.earliest.equals(consumer.getStartMessageId())){ + boolean readFromEarliest = isFirstRead && MessageId.earliest.equals(consumer.getStartMessageId()) + && (!cursor.isDurable() || cursor.getName().equals(Compactor.COMPACTION_SUBSCRIPTION) + || cursor.getMarkDeletedPosition() == null + || cursor.getMarkDeletedPosition().getEntryId() == -1L); + if (readFromEarliest){ cursorPosition = PositionImpl.EARLIEST; } else { cursorPosition = (PositionImpl) cursor.getReadPosition(); @@ -106,8 +118,11 @@ public void asyncReadEntriesOrWait(ManagedCursor cursor, if (currentCompactionHorizon == null || currentCompactionHorizon.compareTo(cursorPosition) < 0) { - cursor.asyncReadEntriesOrWait(numberOfEntriesToRead, callback, readEntriesCtx, PositionImpl.LATEST); + cursor.asyncReadEntriesOrWait(maxEntries, bytesToRead, callback, readEntriesCtx, maxReadPosition); } else { + ManagedCursorImpl managedCursor = (ManagedCursorImpl) cursor; + int numberOfEntriesToRead = managedCursor.applyMaxSizeCap(maxEntries, bytesToRead); + compactedTopicContext.thenCompose( (context) -> findStartPoint(cursorPosition, context.ledger.getLastAddConfirmed(), context.cache) .thenCompose((startPoint) -> { @@ -119,9 +134,15 @@ public void asyncReadEntriesOrWait(ManagedCursor cursor, return CompletableFuture.completedFuture(null); } else { long endPoint = Math.min(context.ledger.getLastAddConfirmed(), - startPoint + numberOfEntriesToRead); + startPoint + (numberOfEntriesToRead - 1)); return readEntries(context.ledger, startPoint, endPoint) .thenAccept((entries) -> { + long entriesSize = 0; + for (Entry entry : entries) { + entriesSize += entry.getLength(); + } + managedCursor.updateReadStats(entries.size(), entriesSize); + Entry lastEntry = entries.get(entries.size() - 1); // The compaction task depends on the last snapshot and the incremental // entries to build the new snapshot. So for the compaction cursor, we @@ -304,6 +325,55 @@ public CompletableFuture readLastEntryOfCompactedLedger() { }); } + public CompletableFuture findFirstMatchEntry(final Predicate predicate) { + var compactedTopicContextFuture = this.getCompactedTopicContextFuture(); + + if (compactedTopicContextFuture == null) { + return CompletableFuture.completedFuture(null); + } + return compactedTopicContextFuture.thenCompose(compactedTopicContext -> { + LedgerHandle lh = compactedTopicContext.getLedger(); + CompletableFuture promise = new CompletableFuture<>(); + findFirstMatchIndexLoop(predicate, 0L, lh.getLastAddConfirmed(), promise, null, lh); + return promise.thenCompose(index -> { + if (index == null) { + return CompletableFuture.completedFuture(null); + } + return readEntries(lh, index, index).thenApply(entries -> entries.get(0)); + }); + }); + } + private static void findFirstMatchIndexLoop(final Predicate predicate, + final long start, final long end, + final CompletableFuture promise, + final Long lastMatchIndex, + final LedgerHandle lh) { + if (start > end) { + promise.complete(lastMatchIndex); + return; + } + + long mid = (start + end) / 2; + readEntries(lh, mid, mid).thenAccept(entries -> { + Entry entry = entries.get(0); + final boolean isMatch; + try { + isMatch = predicate.test(entry); + } finally { + entry.release(); + } + + if (isMatch) { + findFirstMatchIndexLoop(predicate, start, mid - 1, promise, mid, lh); + } else { + findFirstMatchIndexLoop(predicate, mid + 1, end, promise, lastMatchIndex, lh); + } + }).exceptionally(ex -> { + promise.completeExceptionally(ex); + return null; + }); + } + private static int comparePositionAndMessageId(PositionImpl p, MessageIdData m) { return ComparisonChain.start() .compare(p.getLedgerId(), m.getLedgerId()) @@ -314,6 +384,11 @@ public Optional getCompactionHorizon() { return Optional.ofNullable(this.compactionHorizon); } + public void reset() { + this.compactionHorizon = null; + this.compactedTopicContext = null; + } + @Nullable public CompletableFuture getCompactedTopicContextFuture() { return compactedTopicContext; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicUtils.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicUtils.java index 6acd33279fd40..c084593b0fa37 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicUtils.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicUtils.java @@ -30,6 +30,7 @@ import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.collections4.CollectionUtils; import org.apache.pulsar.broker.service.Consumer; @@ -38,15 +39,24 @@ public class CompactedTopicUtils { - @Beta public static void asyncReadCompactedEntries(TopicCompactionService topicCompactionService, - ManagedCursor cursor, int numberOfEntriesToRead, + ManagedCursor cursor, int maxEntries, long bytesToRead, boolean readFromEarliest, AsyncCallbacks.ReadEntriesCallback callback, boolean wait, @Nullable Consumer consumer) { + asyncReadCompactedEntries(topicCompactionService, cursor, maxEntries, bytesToRead, PositionImpl.LATEST, + readFromEarliest, callback, wait, consumer); + } + + @Beta + public static void asyncReadCompactedEntries(TopicCompactionService topicCompactionService, + ManagedCursor cursor, int maxEntries, + long bytesToRead, PositionImpl maxReadPosition, + boolean readFromEarliest, AsyncCallbacks.ReadEntriesCallback callback, + boolean wait, @Nullable Consumer consumer) { Objects.requireNonNull(topicCompactionService); Objects.requireNonNull(cursor); - checkArgument(numberOfEntriesToRead > 0); + checkArgument(maxEntries > 0); Objects.requireNonNull(callback); final PositionImpl readPosition; @@ -67,15 +77,16 @@ public static void asyncReadCompactedEntries(TopicCompactionService topicCompact || readPosition.compareTo( lastCompactedPosition.getLedgerId(), lastCompactedPosition.getEntryId()) > 0) { if (wait) { - cursor.asyncReadEntriesOrWait(numberOfEntriesToRead, bytesToRead, callback, readEntriesCtx, - PositionImpl.LATEST); + cursor.asyncReadEntriesOrWait(maxEntries, bytesToRead, callback, readEntriesCtx, maxReadPosition); } else { - cursor.asyncReadEntries(numberOfEntriesToRead, bytesToRead, callback, readEntriesCtx, - PositionImpl.LATEST); + cursor.asyncReadEntries(maxEntries, bytesToRead, callback, readEntriesCtx, maxReadPosition); } return CompletableFuture.completedFuture(null); } + ManagedCursorImpl managedCursor = (ManagedCursorImpl) cursor; + int numberOfEntriesToRead = managedCursor.applyMaxSizeCap(maxEntries, bytesToRead); + return topicCompactionService.readCompactedEntries(readPosition, numberOfEntriesToRead) .thenAccept(entries -> { if (CollectionUtils.isEmpty(entries)) { @@ -88,6 +99,12 @@ public static void asyncReadCompactedEntries(TopicCompactionService topicCompact return; } + long entriesSize = 0; + for (Entry entry : entries) { + entriesSize += entry.getLength(); + } + managedCursor.updateReadStats(entries.size(), entriesSize); + Entry lastEntry = entries.get(entries.size() - 1); cursor.seek(lastEntry.getPosition().getNext(), true); callback.readEntriesComplete(entries, readEntriesCtx); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/PulsarTopicCompactionService.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/PulsarTopicCompactionService.java index dd218c9be51a2..c420767d1e884 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/PulsarTopicCompactionService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/PulsarTopicCompactionService.java @@ -78,7 +78,7 @@ public CompletableFuture> readCompactedEntries(@Nonnull Position sta return CompletableFuture.completedFuture(Collections.emptyList()); } long endPoint = - Math.min(context.ledger.getLastAddConfirmed(), startPoint + numberOfEntriesToRead); + Math.min(context.ledger.getLastAddConfirmed(), startPoint + (numberOfEntriesToRead - 1)); return CompactedTopicImpl.readEntries(context.ledger, startPoint, endPoint); })).whenComplete((result, ex) -> { if (ex == null) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/StrategicTwoPhaseCompactor.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/StrategicTwoPhaseCompactor.java index a6b0942742763..fefa2ee959cc5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/StrategicTwoPhaseCompactor.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/StrategicTwoPhaseCompactor.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.compaction; -import com.google.common.annotations.VisibleForTesting; import io.netty.buffer.ByteBuf; import java.time.Duration; import java.util.Iterator; @@ -63,39 +62,19 @@ public class StrategicTwoPhaseCompactor extends TwoPhaseCompactor { private static final Logger log = LoggerFactory.getLogger(StrategicTwoPhaseCompactor.class); private static final int MAX_OUTSTANDING = 500; - private static final int MAX_NUM_MESSAGES_IN_BATCH = 1000; - private static final int MAX_BYTES_IN_BATCH = 128 * 1024; private static final int MAX_READER_RECONNECT_WAITING_TIME_IN_MILLIS = 20 * 1000; private final Duration phaseOneLoopReadTimeout; private final RawBatchMessageContainerImpl batchMessageContainer; - @VisibleForTesting public StrategicTwoPhaseCompactor(ServiceConfiguration conf, PulsarClient pulsar, BookKeeper bk, - ScheduledExecutorService scheduler, - int maxNumMessagesInBatch) { - this(conf, pulsar, bk, scheduler, maxNumMessagesInBatch, MAX_BYTES_IN_BATCH); - } - - private StrategicTwoPhaseCompactor(ServiceConfiguration conf, - PulsarClient pulsar, - BookKeeper bk, - ScheduledExecutorService scheduler, - int maxNumMessagesInBatch, - int maxBytesInBatch) { + ScheduledExecutorService scheduler) { super(conf, pulsar, bk, scheduler); - batchMessageContainer = new RawBatchMessageContainerImpl(maxNumMessagesInBatch, maxBytesInBatch); + batchMessageContainer = new RawBatchMessageContainerImpl(); phaseOneLoopReadTimeout = Duration.ofSeconds(conf.getBrokerServiceCompactionPhaseOneLoopTimeInSeconds()); } - public StrategicTwoPhaseCompactor(ServiceConfiguration conf, - PulsarClient pulsar, - BookKeeper bk, - ScheduledExecutorService scheduler) { - this(conf, pulsar, bk, scheduler, MAX_NUM_MESSAGES_IN_BATCH, MAX_BYTES_IN_BATCH); - } - public CompletableFuture compact(String topic) { throw new UnsupportedOperationException(); } @@ -418,7 +397,6 @@ private void phaseTwoLoop(String topic, Iterator> reader, .whenComplete((res, exception2) -> { if (exception2 != null) { promise.completeExceptionally(exception2); - return; } }); phaseTwoLoop(topic, reader, lh, outstanding, promise); @@ -443,35 +421,45 @@ private void phaseTwoLoop(String topic, Iterator> reader, CompletableFuture addToCompactedLedger( LedgerHandle lh, Message m, String topic, Semaphore outstanding) { + if (m == null) { + return flushBatchMessage(lh, topic, outstanding); + } + if (batchMessageContainer.haveEnoughSpace((MessageImpl) m)) { + batchMessageContainer.add((MessageImpl) m, null); + return CompletableFuture.completedFuture(false); + } + CompletableFuture f = flushBatchMessage(lh, topic, outstanding); + batchMessageContainer.add((MessageImpl) m, null); + return f; + } + + private CompletableFuture flushBatchMessage(LedgerHandle lh, String topic, + Semaphore outstanding) { + if (batchMessageContainer.getNumMessagesInBatch() <= 0) { + return CompletableFuture.completedFuture(false); + } CompletableFuture bkf = new CompletableFuture<>(); - if (m == null || batchMessageContainer.add((MessageImpl) m, null)) { - if (batchMessageContainer.getNumMessagesInBatch() > 0) { - try { - ByteBuf serialized = batchMessageContainer.toByteBuf(); - outstanding.acquire(); - mxBean.addCompactionWriteOp(topic, serialized.readableBytes()); - long start = System.nanoTime(); - lh.asyncAddEntry(serialized, - (rc, ledger, eid, ctx) -> { - outstanding.release(); - mxBean.addCompactionLatencyOp(topic, System.nanoTime() - start, TimeUnit.NANOSECONDS); - if (rc != BKException.Code.OK) { - bkf.completeExceptionally(BKException.create(rc)); - } else { - bkf.complete(true); - } - }, null); + try { + ByteBuf serialized = batchMessageContainer.toByteBuf(); + outstanding.acquire(); + mxBean.addCompactionWriteOp(topic, serialized.readableBytes()); + long start = System.nanoTime(); + lh.asyncAddEntry(serialized, + (rc, ledger, eid, ctx) -> { + outstanding.release(); + mxBean.addCompactionLatencyOp(topic, System.nanoTime() - start, TimeUnit.NANOSECONDS); + if (rc != BKException.Code.OK) { + bkf.completeExceptionally(BKException.create(rc)); + } else { + bkf.complete(true); + } + }, null); - } catch (Throwable t) { - log.error("Failed to add entry", t); - batchMessageContainer.discard((Exception) t); - return FutureUtil.failedFuture(t); - } - } else { - bkf.complete(false); - } - } else { - bkf.complete(false); + } catch (Throwable t) { + log.error("Failed to add entry", t); + batchMessageContainer.discard((Exception) t); + bkf.completeExceptionally(t); + return bkf; } return bkf; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/TwoPhaseCompactor.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/TwoPhaseCompactor.java index 7d3b5863cb6f6..647c34a94ad81 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/TwoPhaseCompactor.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/TwoPhaseCompactor.java @@ -45,6 +45,7 @@ import org.apache.pulsar.client.impl.RawBatchConverter; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.protocol.Commands; +import org.apache.pulsar.common.protocol.Markers; import org.apache.pulsar.common.util.FutureUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,6 +63,7 @@ public class TwoPhaseCompactor extends Compactor { private static final Logger log = LoggerFactory.getLogger(TwoPhaseCompactor.class); private static final int MAX_OUTSTANDING = 500; private final Duration phaseOneLoopReadTimeout; + private final boolean topicCompactionRetainNullKey; public TwoPhaseCompactor(ServiceConfiguration conf, PulsarClient pulsar, @@ -69,6 +71,7 @@ public TwoPhaseCompactor(ServiceConfiguration conf, ScheduledExecutorService scheduler) { super(conf, pulsar, bk, scheduler); phaseOneLoopReadTimeout = Duration.ofSeconds(conf.getBrokerServiceCompactionPhaseOneLoopTimeInSeconds()); + topicCompactionRetainNullKey = conf.isTopicCompactionRetainNullKey(); } @Override @@ -122,26 +125,43 @@ private void phaseOneLoop(RawReader reader, () -> FutureUtil.createTimeoutException("Timeout", getClass(), "phaseOneLoop(...)")); future.thenAcceptAsync(m -> { - try { + try (m) { MessageId id = m.getMessageId(); boolean deletedMessage = false; boolean replaceMessage = false; mxBean.addCompactionReadOp(reader.getTopic(), m.getHeadersAndPayload().readableBytes()); - if (RawBatchConverter.isReadableBatch(m)) { + MessageMetadata metadata = Commands.parseMessageMetadata(m.getHeadersAndPayload()); + if (Markers.isServerOnlyMarker(metadata)) { + mxBean.addCompactionRemovedEvent(reader.getTopic()); + deletedMessage = true; + } else if (RawBatchConverter.isReadableBatch(metadata)) { try { + int numMessagesInBatch = metadata.getNumMessagesInBatch(); + int deleteCnt = 0; for (ImmutableTriple e : extractIdsAndKeysAndSizeFromBatch(m)) { if (e != null) { + if (e.getMiddle() == null) { + if (!topicCompactionRetainNullKey) { + // record delete null-key message event + deleteCnt++; + mxBean.addCompactionRemovedEvent(reader.getTopic()); + } + continue; + } if (e.getRight() > 0) { MessageId old = latestForKey.put(e.getMiddle(), e.getLeft()); - replaceMessage = old != null; + if (old != null) { + mxBean.addCompactionRemovedEvent(reader.getTopic()); + } } else { - deletedMessage = true; latestForKey.remove(e.getMiddle()); + deleteCnt++; + mxBean.addCompactionRemovedEvent(reader.getTopic()); } } - if (replaceMessage || deletedMessage) { - mxBean.addCompactionRemovedEvent(reader.getTopic()); - } + } + if (deleteCnt == numMessagesInBatch) { + deletedMessage = true; } } catch (IOException ioe) { log.info("Error decoding batch for message {}. Whole batch will be included in output", @@ -157,6 +177,10 @@ private void phaseOneLoop(RawReader reader, deletedMessage = true; latestForKey.remove(keyAndSize.getLeft()); } + } else { + if (!topicCompactionRetainNullKey) { + deletedMessage = true; + } } if (replaceMessage || deletedMessage) { mxBean.addCompactionRemovedEvent(reader.getTopic()); @@ -174,8 +198,6 @@ private void phaseOneLoop(RawReader reader, lastMessageId, latestForKey, loopPromise); } - } finally { - m.close(); } }, scheduler).exceptionally(ex -> { loopPromise.completeExceptionally(ex); @@ -201,7 +223,7 @@ private CompletableFuture phaseTwoSeekThenLoop(RawReader reader, MessageId reader.seekAsync(from).thenCompose((v) -> { Semaphore outstanding = new Semaphore(MAX_OUTSTANDING); CompletableFuture loopPromise = new CompletableFuture<>(); - phaseTwoLoop(reader, to, latestForKey, ledger, outstanding, loopPromise); + phaseTwoLoop(reader, to, latestForKey, ledger, outstanding, loopPromise, MessageId.earliest); return loopPromise; }).thenCompose((v) -> closeLedger(ledger)) .thenCompose((v) -> reader.acknowledgeCumulativeAsync(lastReadId, @@ -223,7 +245,8 @@ private CompletableFuture phaseTwoSeekThenLoop(RawReader reader, MessageId } private void phaseTwoLoop(RawReader reader, MessageId to, Map latestForKey, - LedgerHandle lh, Semaphore outstanding, CompletableFuture promise) { + LedgerHandle lh, Semaphore outstanding, CompletableFuture promise, + MessageId lastCompactedMessageId) { if (promise.isDone()) { return; } @@ -232,14 +255,24 @@ private void phaseTwoLoop(RawReader reader, MessageId to, Map m.close(); return; } + + if (m.getMessageId().compareTo(lastCompactedMessageId) <= 0) { + m.close(); + phaseTwoLoop(reader, to, latestForKey, lh, outstanding, promise, lastCompactedMessageId); + return; + } + try { MessageId id = m.getMessageId(); Optional messageToAdd = Optional.empty(); mxBean.addCompactionReadOp(reader.getTopic(), m.getHeadersAndPayload().readableBytes()); - if (RawBatchConverter.isReadableBatch(m)) { + MessageMetadata metadata = Commands.parseMessageMetadata(m.getHeadersAndPayload()); + if (Markers.isServerOnlyMarker(metadata)) { + messageToAdd = Optional.empty(); + } else if (RawBatchConverter.isReadableBatch(metadata)) { try { - messageToAdd = rebatchMessage( - m, (key, subid) -> subid.equals(latestForKey.get(key))); + messageToAdd = rebatchMessage(reader.getTopic(), + m, (key, subid) -> subid.equals(latestForKey.get(key)), topicCompactionRetainNullKey); } catch (IOException ioe) { log.info("Error decoding batch for message {}. Whole batch will be included in output", id, ioe); @@ -248,8 +281,8 @@ private void phaseTwoLoop(RawReader reader, MessageId to, Map } else { Pair keyAndSize = extractKeyAndSize(m); MessageId msg; - if (keyAndSize == null) { // pass through messages without a key - messageToAdd = Optional.of(m); + if (keyAndSize == null) { + messageToAdd = topicCompactionRetainNullKey ? Optional.of(m) : Optional.empty(); } else if ((msg = latestForKey.get(keyAndSize.getLeft())) != null && msg.equals(id)) { // consider message only if present into latestForKey map if (keyAndSize.getRight() <= 0) { @@ -272,11 +305,14 @@ private void phaseTwoLoop(RawReader reader, MessageId to, Map } }); if (to.equals(id)) { + // make sure all inflight writes have finished + outstanding.acquire(MAX_OUTSTANDING); addFuture.whenComplete((res, exception2) -> { if (exception2 == null) { promise.complete(null); } }); + return; } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -299,7 +335,7 @@ private void phaseTwoLoop(RawReader reader, MessageId to, Map } return; } - phaseTwoLoop(reader, to, latestForKey, lh, outstanding, promise); + phaseTwoLoop(reader, to, latestForKey, lh, outstanding, promise, m.getMessageId()); } finally { m.close(); } @@ -405,9 +441,13 @@ protected List> extractIdsAndKeysAnd return RawBatchConverter.extractIdsAndKeysAndSize(msg); } - protected Optional rebatchMessage(RawMessage msg, BiPredicate filter) + protected Optional rebatchMessage(String topic, RawMessage msg, BiPredicate filter, + boolean retainNullKey) throws IOException { - return RawBatchConverter.rebatchMessage(msg, filter); + if (log.isDebugEnabled()) { + log.debug("Rebatching message {} for topic {}", msg.getMessageId(), topic); + } + return RawBatchConverter.rebatchMessage(msg, filter, retainNullKey); } private static class PhaseOneResult { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsemble.java b/pulsar-broker/src/main/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsemble.java index d73d1d7ed6bed..63d146a3a1521 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsemble.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsemble.java @@ -52,7 +52,6 @@ import org.apache.bookkeeper.clients.exceptions.NamespaceExistsException; import org.apache.bookkeeper.clients.exceptions.NamespaceNotFoundException; import org.apache.bookkeeper.common.allocator.PoolingPolicy; -import org.apache.bookkeeper.common.component.ComponentStarter; import org.apache.bookkeeper.common.component.LifecycleComponent; import org.apache.bookkeeper.common.component.LifecycleComponentStack; import org.apache.bookkeeper.common.concurrent.FutureUtils; @@ -132,7 +131,7 @@ public LocalBookkeeperEnsemble(int numberOfBookies, boolean clearOldData, String advertisedAddress) { this(numberOfBookies, zkPort, streamStoragePort, zkDataDirName, bkDataDirName, clearOldData, advertisedAddress, - new BasePortManager(bkBasePort)); + bkBasePort != 0 ? new BasePortManager(bkBasePort) : () -> 0); } public LocalBookkeeperEnsemble(int numberOfBookies, @@ -311,6 +310,7 @@ private void runBookies(ServerConfiguration baseConf) throws Exception { bsConfs[i] = new ServerConfiguration(baseConf); // override settings bsConfs[i].setBookiePort(bookiePort); + bsConfs[i].setBookieId("bk" + i + "test"); String zkServers = "127.0.0.1:" + zkPort; String metadataServiceUriStr = "zk://" + zkServers + "/ledgers"; @@ -455,8 +455,10 @@ public void startBK(int i) throws Exception { try { bookieComponents[i] = org.apache.bookkeeper.server.Main .buildBookieServer(new BookieConfiguration(bsConfs[i])); - ComponentStarter.startComponent(bookieComponents[i]); + bookieComponents[i].start(); } catch (BookieException.InvalidCookieException ice) { + LOG.warn("Invalid cookie found for bookie {}", i, ice); + // InvalidCookieException can happen if the machine IP has changed // Since we are running here a local bookie that is always accessed // from localhost, we can ignore the error @@ -473,7 +475,7 @@ public void startBK(int i) throws Exception { bookieComponents[i] = org.apache.bookkeeper.server.Main .buildBookieServer(new BookieConfiguration(bsConfs[i])); - ComponentStarter.startComponent(bookieComponents[i]); + bookieComponents[i].start(); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/BookKeeperClientFactoryImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/BookKeeperClientFactoryImplTest.java index 0dea84e727a88..3c0e4d0c409df 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/BookKeeperClientFactoryImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/BookKeeperClientFactoryImplTest.java @@ -41,6 +41,7 @@ import org.apache.pulsar.bookie.rackawareness.BookieRackAffinityMapping; import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.zookeeper.ZkIsolatedBookieEnsemblePlacementPolicy; import org.testng.annotations.Test; /** @@ -152,6 +153,24 @@ public void testSetDefaultEnsemblePlacementPolicyRackAwareEnabledChangedValues() assertEquals(20, bkConf.getMinNumRacksPerWriteQuorum()); } + @Test + public void testSetEnsemblePlacementPolicys() { + ClientConfiguration bkConf = new ClientConfiguration(); + ServiceConfiguration conf = new ServiceConfiguration(); + conf.setBookkeeperClientMinNumRacksPerWriteQuorum(3); + conf.setBookkeeperClientEnforceMinNumRacksPerWriteQuorum(true); + + MetadataStore store = mock(MetadataStore.class); + + BookKeeperClientFactoryImpl.setEnsemblePlacementPolicy( + bkConf, + conf, + store, + ZkIsolatedBookieEnsemblePlacementPolicy.class); + assertEquals(bkConf.getMinNumRacksPerWriteQuorum(), 3); + assertTrue(bkConf.getEnforceMinNumRacksPerWriteQuorum()); + } + @Test public void testSetDiskWeightBasedPlacementEnabled() { BookKeeperClientFactoryImpl factory = new BookKeeperClientFactoryImpl(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/PulsarServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/PulsarServiceTest.java index 37a7310ae17ca..3e0887646e119 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/PulsarServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/PulsarServiceTest.java @@ -54,6 +54,8 @@ protected void cleanup() throws Exception { @Override protected void doInitConf() throws Exception { super.doInitConf(); + conf.setBrokerServicePortTls(Optional.of(0)); + conf.setWebServicePortTls(Optional.of(0)); if (useStaticPorts) { conf.setBrokerServicePortTls(Optional.of(6651)); conf.setBrokerServicePort(Optional.of(6660)); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/SLAMonitoringTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/SLAMonitoringTest.java index 47949d7312b88..4a6524bf24521 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/SLAMonitoringTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/SLAMonitoringTest.java @@ -102,9 +102,9 @@ void setup() throws Exception { createTenant(pulsarAdmins[BROKER_COUNT - 1]); for (int i = 0; i < BROKER_COUNT; i++) { - String topic = String.format("%s/%s/%s:%s", NamespaceService.SLA_NAMESPACE_PROPERTY, "my-cluster", - pulsarServices[i].getAdvertisedAddress(), brokerWebServicePorts[i]); - pulsarAdmins[0].namespaces().createNamespace(topic); + var namespaceName = NamespaceService.getSLAMonitorNamespace(pulsarServices[i].getBrokerId(), + pulsarServices[i].getConfig()); + pulsarAdmins[0].namespaces().createNamespace(namespaceName.toString()); } } @@ -173,9 +173,9 @@ public void testOwnedNamespaces() { public void testOwnershipViaAdminAfterSetup() { for (int i = 0; i < BROKER_COUNT; i++) { try { - String topic = String.format("persistent://%s/%s/%s:%s/%s", - NamespaceService.SLA_NAMESPACE_PROPERTY, "my-cluster", pulsarServices[i].getAdvertisedAddress(), - brokerWebServicePorts[i], "my-topic"); + String topic = String.format("persistent://%s/%s/%s/%s", + NamespaceService.SLA_NAMESPACE_PROPERTY, "my-cluster", + pulsarServices[i].getBrokerId(), "my-topic"); assertEquals(pulsarAdmins[0].lookups().lookupTopic(topic), "pulsar://" + pulsarServices[i].getAdvertisedAddress() + ":" + brokerNativeBrokerPorts[i]); } catch (Exception e) { @@ -199,8 +199,8 @@ public void testUnloadIfBrokerCrashes() { fail("Should be a able to close the broker index " + crashIndex + " Exception: " + e); } - String topic = String.format("persistent://%s/%s/%s:%s/%s", NamespaceService.SLA_NAMESPACE_PROPERTY, - "my-cluster", pulsarServices[crashIndex].getAdvertisedAddress(), brokerWebServicePorts[crashIndex], + String topic = String.format("persistent://%s/%s/%s/%s", NamespaceService.SLA_NAMESPACE_PROPERTY, + "my-cluster", pulsarServices[crashIndex].getBrokerId(), "my-topic"); log.info("Lookup for namespace {}", topic); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/TopicEventsListenerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/TopicEventsListenerTest.java index e6459bbf74c31..ceb3c1d0d9335 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/TopicEventsListenerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/TopicEventsListenerTest.java @@ -126,7 +126,7 @@ public void testEvents(String topicTypePersistence, String topicTypePartitioned, boolean forceDelete) throws Exception { String topicName = topicTypePersistence + "://" + namespace + "/" + "topic-" + UUID.randomUUID(); - createTopicAndVerifyEvents(topicTypePartitioned, topicName); + createTopicAndVerifyEvents(topicTypePersistence, topicTypePartitioned, topicName); events.clear(); if (topicTypePartitioned.equals("partitioned")) { @@ -150,7 +150,7 @@ public void testEventsWithUnload(String topicTypePersistence, String topicTypePa boolean forceDelete) throws Exception { String topicName = topicTypePersistence + "://" + namespace + "/" + "topic-" + UUID.randomUUID(); - createTopicAndVerifyEvents(topicTypePartitioned, topicName); + createTopicAndVerifyEvents(topicTypePersistence, topicTypePartitioned, topicName); events.clear(); admin.topics().unload(topicName); @@ -182,7 +182,7 @@ public void testEventsActiveSub(String topicTypePersistence, String topicTypePar boolean forceDelete) throws Exception { String topicName = topicTypePersistence + "://" + namespace + "/" + "topic-" + UUID.randomUUID(); - createTopicAndVerifyEvents(topicTypePartitioned, topicName); + createTopicAndVerifyEvents(topicTypePersistence, topicTypePartitioned, topicName); Consumer consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("sub").subscribe(); Producer producer = pulsarClient.newProducer().topic(topicName).create(); @@ -238,7 +238,7 @@ public void testEventsActiveSub(String topicTypePersistence, String topicTypePar public void testTopicAutoGC(String topicTypePersistence, String topicTypePartitioned) throws Exception { String topicName = topicTypePersistence + "://" + namespace + "/" + "topic-" + UUID.randomUUID(); - createTopicAndVerifyEvents(topicTypePartitioned, topicName); + createTopicAndVerifyEvents(topicTypePersistence, topicTypePartitioned, topicName); admin.namespaces().setInactiveTopicPolicies(namespace, new InactiveTopicPolicies(InactiveTopicDeleteMode.delete_when_no_subscriptions, 1, true)); @@ -262,25 +262,21 @@ public void testTopicAutoGC(String topicTypePersistence, String topicTypePartiti ); } - private void createTopicAndVerifyEvents(String topicTypePartitioned, String topicName) throws Exception { + private void createTopicAndVerifyEvents(String topicDomain, String topicTypePartitioned, String topicName) throws Exception { final String[] expectedEvents; - if (topicTypePartitioned.equals("partitioned")) { - topicNameToWatch = topicName + "-partition-1"; - admin.topics().createPartitionedTopic(topicName, 2); - triggerPartitionsCreation(topicName); - + if (topicDomain.equalsIgnoreCase("persistent") || topicTypePartitioned.equals("partitioned")) { expectedEvents = new String[]{ "LOAD__BEFORE", "CREATE__BEFORE", "CREATE__SUCCESS", "LOAD__SUCCESS" }; - } else { - topicNameToWatch = topicName; - admin.topics().createNonPartitionedTopic(topicName); - expectedEvents = new String[]{ + // Before https://github.com/apache/pulsar/pull/21995, Pulsar will skip create topic if the topic + // was already exists, and the action "check topic exists" will try to load Managed ledger, + // the check triggers two exrtra events: [LOAD__BEFORE, LOAD__FAILURE]. + // #21995 fixed this wrong behavior, so remove these two events. "LOAD__BEFORE", "LOAD__FAILURE", "LOAD__BEFORE", @@ -288,7 +284,14 @@ private void createTopicAndVerifyEvents(String topicTypePartitioned, String topi "CREATE__SUCCESS", "LOAD__SUCCESS" }; - + } + if (topicTypePartitioned.equals("partitioned")) { + topicNameToWatch = topicName + "-partition-1"; + admin.topics().createPartitionedTopic(topicName, 2); + triggerPartitionsCreation(topicName); + } else { + topicNameToWatch = topicName; + admin.topics().createNonPartitionedTopic(topicName); } Awaitility.waitAtMost(10, TimeUnit.SECONDS).untilAsserted(() -> diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java index d6176966d85d9..7cf84673be86a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java @@ -61,11 +61,13 @@ import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.admin.AdminApiTest.MockedPulsarService; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl; +import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerWrapper; import org.apache.pulsar.broker.loadbalance.impl.SimpleLoadManagerImpl; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; @@ -437,19 +439,13 @@ public void testTopicPoliciesWithMultiBroker() throws Exception { String tenantName = newUniqueName("prop-xyz2"); admin.tenants().createTenant(tenantName, tenantInfo); admin.namespaces().createNamespace(tenantName + "/ns1", Set.of("test")); - conf.setBrokerServicePort(Optional.of(1024)); - conf.setBrokerServicePortTls(Optional.of(1025)); - conf.setWebServicePort(Optional.of(1026)); - conf.setWebServicePortTls(Optional.of(1027)); + ServiceConfiguration config2 = super.getDefaultConf(); @Cleanup - PulsarTestContext pulsarTestContext2 = createAdditionalPulsarTestContext(conf); + PulsarTestContext pulsarTestContext2 = createAdditionalPulsarTestContext(config2); PulsarService pulsar2 = pulsarTestContext2.getPulsarService(); - conf.setBrokerServicePort(Optional.of(2048)); - conf.setBrokerServicePortTls(Optional.of(2049)); - conf.setWebServicePort(Optional.of(2050)); - conf.setWebServicePortTls(Optional.of(2051)); + ServiceConfiguration config3 = super.getDefaultConf(); @Cleanup - PulsarTestContext pulsarTestContext3 = createAdditionalPulsarTestContext(conf); + PulsarTestContext pulsarTestContext3 = createAdditionalPulsarTestContext(config3); PulsarService pulsar3 = pulsarTestContext.getPulsarService(); @Cleanup PulsarAdmin admin2 = PulsarAdmin.builder().serviceHttpUrl(pulsar2.getWebServiceAddress()).build(); @@ -519,8 +515,7 @@ public void nonPersistentTopics() throws Exception { assertEquals(topicStats.getSubscriptions().get("my-sub").getMsgDropRate(), 0); assertEquals(topicStats.getPublishers().size(), 0); assertEquals(topicStats.getMsgDropRate(), 0); - assertEquals(topicStats.getOwnerBroker(), - pulsar.getAdvertisedAddress() + ":" + pulsar.getConfiguration().getWebServicePort().get()); + assertEquals(topicStats.getOwnerBroker(), pulsar.getBrokerId()); PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(nonPersistentTopicName, false); assertEquals(internalStats.cursors.keySet(), Set.of("my-sub")); @@ -1313,7 +1308,7 @@ public void brokerNamespaceIsolationPolicies() throws Exception { String cluster = pulsar.getConfiguration().getClusterName(); String namespaceRegex = "other/" + cluster + "/other.*"; String brokerName = pulsar.getAdvertisedAddress(); - String brokerAddress = brokerName + ":" + pulsar.getConfiguration().getWebServicePort().get(); + String brokerAddress = pulsar.getBrokerId(); Map parameters1 = new HashMap<>(); parameters1.put("min_limit", "1"); @@ -1321,7 +1316,7 @@ public void brokerNamespaceIsolationPolicies() throws Exception { NamespaceIsolationData nsPolicyData1 = NamespaceIsolationData.builder() .namespaces(Collections.singletonList(namespaceRegex)) - .primary(Collections.singletonList(brokerName + ":[0-9]*")) + .primary(Collections.singletonList(brokerName)) .secondary(Collections.singletonList(brokerName + ".*")) .autoFailoverPolicy(AutoFailoverPolicyData.builder() .policyType(AutoFailoverPolicyType.min_available) @@ -1438,19 +1433,10 @@ public void testClusterIsReadyBeforeCreateTopic() throws Exception { admin.namespaces().createNamespace(defaultTenant + "/ns2"); // By default the cluster will configure as configuration file. So the create topic operation // will never throw exception except there is no cluster. - admin.namespaces().setNamespaceReplicationClusters(defaultTenant + "/ns2", new HashSet()); + admin.namespaces().setNamespaceReplicationClusters(defaultTenant + "/ns2", Sets.newHashSet(configClusterName)); - try { - admin.topics().createPartitionedTopic(persistentPartitionedTopicName, partitions); - Assert.fail("should have failed due to Namespace does not have any clusters configured"); - } catch (PulsarAdminException.PreconditionFailedException ignored) { - } - - try { - admin.topics().createPartitionedTopic(NonPersistentPartitionedTopicName, partitions); - Assert.fail("should have failed due to Namespace does not have any clusters configured"); - } catch (PulsarAdminException.PreconditionFailedException ignored) { - } + admin.topics().createPartitionedTopic(persistentPartitionedTopicName, partitions); + admin.topics().createPartitionedTopic(NonPersistentPartitionedTopicName, partitions); } @Test @@ -1719,6 +1705,8 @@ public void testDeleteNamespace(NamespaceAttr namespaceAttr) throws Exception { // Set conf. cleanup(); setNamespaceAttr(namespaceAttr); + this.conf.setMetadataStoreUrl("127.0.0.1:2181"); + this.conf.setConfigurationMetadataStoreUrl("127.0.0.1:2182"); setup(); String tenant = newUniqueName("test-tenant"); @@ -1739,6 +1727,28 @@ public void testDeleteNamespace(NamespaceAttr namespaceAttr) throws Exception { admin.topics().createPartitionedTopic(topic, 10); assertFalse(admin.topics().getList(namespace).isEmpty()); + final String managedLedgersPath = "/managed-ledgers/" + namespace; + final String bundleDataPath = "/loadbalance/bundle-data/" + namespace; + // Trigger bundle owned by brokers. + pulsarClient.newProducer().topic(topic).create().close(); + // Trigger bundle data write to ZK. + Awaitility.await().untilAsserted(() -> { + boolean bundleDataWereWriten = false; + for (PulsarService ps : new PulsarService[]{pulsar, mockPulsarSetup.getPulsar()}) { + ModularLoadManagerWrapper loadManager = (ModularLoadManagerWrapper) ps.getLoadManager().get(); + ModularLoadManagerImpl loadManagerImpl = (ModularLoadManagerImpl) loadManager.getLoadManager(); + ps.getBrokerService().updateRates(); + loadManagerImpl.updateLocalBrokerData(); + loadManagerImpl.writeBundleDataOnZooKeeper(); + bundleDataWereWriten = bundleDataWereWriten || ps.getLocalMetadataStore().exists(bundleDataPath).join(); + } + assertTrue(bundleDataWereWriten); + }); + + // assert znode exists in metadata store + assertTrue(pulsar.getLocalMetadataStore().exists(bundleDataPath).join()); + assertTrue(pulsar.getLocalMetadataStore().exists(managedLedgersPath).join()); + try { admin.namespaces().deleteNamespace(namespace, false); fail("should have failed due to namespace not empty"); @@ -1755,12 +1765,8 @@ public void testDeleteNamespace(NamespaceAttr namespaceAttr) throws Exception { assertFalse(admin.namespaces().getNamespaces(tenant).contains(namespace)); assertTrue(admin.namespaces().getNamespaces(tenant).isEmpty()); - - final String managedLedgersPath = "/managed-ledgers/" + namespace; + // assert znode deleted in metadata store assertFalse(pulsar.getLocalMetadataStore().exists(managedLedgersPath).join()); - - - final String bundleDataPath = "/loadbalance/bundle-data/" + namespace; assertFalse(pulsar.getLocalMetadataStore().exists(bundleDataPath).join()); } @@ -3176,7 +3182,7 @@ public void testFailedUpdatePartitionedTopic() throws Exception { admin.topics().createSubscription(partitionedTopicName + "-partition-" + startPartitions, subName1, MessageId.earliest); fail("Unexpected behaviour"); - } catch (PulsarAdminException.PreconditionFailedException ex) { + } catch (PulsarAdminException.ConflictException ex) { // OK } @@ -3191,6 +3197,32 @@ public void testFailedUpdatePartitionedTopic() throws Exception { assertEquals(admin.topics().getPartitionedTopicMetadata(partitionedTopicName).partitions, newPartitions); } + /** + * Validate retring failed partitioned topic should succeed. + * @throws Exception + */ + @Test + public void testTopicStatsWithEarliestTimeInBacklogIfNoBacklog() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp_"); + final String subscriptionName = "s1"; + admin.topics().createNonPartitionedTopic(topicName); + admin.topics().createSubscription(topicName, subscriptionName, MessageId.earliest); + + // Send one message. + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topicName).enableBatching(false) + .create(); + MessageIdImpl messageId = (MessageIdImpl) producer.send("123"); + // Catch up. + admin.topics().skipAllMessages(topicName, subscriptionName); + // Get topic stats with earliestTimeInBacklog + TopicStats topicStats = admin.topics().getStats(topicName, false, false, true); + assertEquals(topicStats.getSubscriptions().get(subscriptionName).getEarliestMsgPublishTimeInBacklog(), -1L); + + // cleanup. + producer.close(); + admin.topics().delete(topicName); + } + @Test(dataProvider = "topicType") public void testPartitionedStatsAggregationByProducerName(String topicType) throws Exception { restartClusterIfReused(); @@ -3356,4 +3388,61 @@ private void testSetBacklogQuotasNamespaceLevelIfRetentionExists() throws Except // cleanup. admin.namespaces().deleteNamespace(ns); } + + @Test + private void testAnalyzeSubscriptionBacklogNotCauseStuck() throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp"); + final String subscription = "s1"; + admin.topics().createNonPartitionedTopic(topic); + // Send 10 messages. + Consumer consumer = pulsarClient.newConsumer(Schema.STRING).topic(topic).subscriptionName(subscription) + .receiverQueueSize(0).subscribe(); + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topic).create(); + for (int i = 0; i < 10; i++) { + producer.send(i + ""); + } + + // Verify consumer can receive all messages after calling "analyzeSubscriptionBacklog". + admin.topics().analyzeSubscriptionBacklog(topic, subscription, Optional.of(MessageIdImpl.earliest)); + for (int i = 0; i < 10; i++) { + Awaitility.await().untilAsserted(() -> { + Message m = consumer.receive(); + assertNotNull(m); + consumer.acknowledge(m); + }); + } + + // cleanup. + consumer.close(); + producer.close(); + admin.topics().delete(topic); + } + + @Test + public void testGetStatsIfPartitionNotExists() throws Exception { + // create topic. + final String partitionedTp = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp"); + admin.topics().createPartitionedTopic(partitionedTp, 1); + TopicName partition0 = TopicName.get(partitionedTp).getPartition(0); + boolean topicExists1 = pulsar.getBrokerService().getTopic(partition0.toString(), false).join().isPresent(); + assertTrue(topicExists1); + // Verify topics-stats works. + TopicStats topicStats = admin.topics().getStats(partition0.toString()); + assertNotNull(topicStats); + + // Delete partition and call topic-stats again. + admin.topics().delete(partition0.toString()); + boolean topicExists2 = pulsar.getBrokerService().getTopic(partition0.toString(), false).join().isPresent(); + assertFalse(topicExists2); + // Verify: respond 404. + try { + admin.topics().getStats(partition0.toString()); + fail("Should respond 404 after the partition was deleted"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("Topic partitions were not yet created")); + } + + // cleanup. + admin.topics().deletePartitionedTopic(partitionedTp); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java index 3d2a6b934f847..27d72f98c2c49 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java @@ -168,7 +168,7 @@ public Map, Collection>> register(Object callback, Object... c testNamespace, "my-topic", true); } catch (Exception e) { //System.out.println(e.getMessage()); - Assert.assertEquals("Topic not found", e.getMessage()); + Assert.assertTrue(e.getMessage().contains("Topic not found")); } String key = "legendtkl"; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java index a780f889de85f..357422b11f6ce 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java @@ -23,6 +23,7 @@ import static org.testng.Assert.assertTrue; import java.lang.management.ManagementFactory; import java.lang.management.ThreadMXBean; +import java.lang.reflect.Field; import java.time.Duration; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -31,13 +32,21 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.ProducerBuilderImpl; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.naming.TopicVersion; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.compaction.Compactor; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.springframework.util.CollectionUtils; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -236,4 +245,58 @@ public void testHealthCheckupV2() throws Exception { )) ); } + + class DummyProducerBuilder extends ProducerBuilderImpl { + // This is a dummy producer builder to test the health check timeout + // the producer constructed by this builder will not send any message + public DummyProducerBuilder(PulsarClientImpl client, Schema schema) { + super(client, schema); + } + + @Override + public CompletableFuture> createAsync() { + CompletableFuture> future = new CompletableFuture<>(); + super.createAsync().thenAccept(producer -> { + Producer spyProducer = Mockito.spy(producer); + Mockito.doReturn(CompletableFuture.completedFuture(MessageId.earliest)) + .when(spyProducer).sendAsync(Mockito.any()); + future.complete(spyProducer); + }).exceptionally(ex -> { + future.completeExceptionally(ex); + return null; + }); + return future; + } + } + + @Test + public void testHealthCheckTimeOut() throws Exception { + final String testHealthCheckTopic = String.format("persistent://pulsar/localhost:%s/healthcheck", + pulsar.getConfig().getWebServicePort().get()); + PulsarClient client = pulsar.getClient(); + PulsarClient spyClient = Mockito.spy(client); + Mockito.doReturn(new DummyProducerBuilder<>((PulsarClientImpl) spyClient, Schema.BYTES)) + .when(spyClient).newProducer(Schema.STRING); + // use reflection to replace the client in the broker + Field field = PulsarService.class.getDeclaredField("client"); + field.setAccessible(true); + field.set(pulsar, spyClient); + try { + admin.brokers().healthcheck(TopicVersion.V2); + throw new Exception("Should not reach here"); + } catch (PulsarAdminException e) { + log.info("Exception caught", e); + assertTrue(e.getMessage().contains("LowOverheadTimeoutException")); + } + // To ensure we don't have any subscription, the producers and readers are closed. + Awaitility.await().untilAsserted(() -> + assertTrue(CollectionUtils.isEmpty(admin.topics() + .getSubscriptions(testHealthCheckTopic).stream() + // All system topics are using compaction, even though is not explicitly set in the policies. + .filter(v -> !v.equals(Compactor.COMPACTION_SUBSCRIPTION)) + .collect(Collectors.toList()) + )) + ); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiMultiBrokersTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiMultiBrokersTest.java index de4cf9658b201..7c9154a27ff69 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiMultiBrokersTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiMultiBrokersTest.java @@ -81,9 +81,8 @@ public void testGetLeaderBroker() assertTrue(leaderBroker.isPresent()); log.info("Leader broker is {}", leaderBroker); for (PulsarAdmin admin : getAllAdmins()) { - String serviceUrl = admin.brokers().getLeaderBroker().getServiceUrl(); - log.info("Pulsar admin get leader broker is {}", serviceUrl); - assertEquals(leaderBroker.get().getServiceUrl(), serviceUrl); + String brokerId = admin.brokers().getLeaderBroker().getBrokerId(); + assertEquals(leaderBroker.get().getBrokerId(), brokerId); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java index 342a409c4ae51..2426be2bee91b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java @@ -330,10 +330,12 @@ public void clusters() throws Exception { } catch (PulsarAdminException e) { assertTrue(e instanceof PreconditionFailedException); } + + restartBroker(); } @Test - public void clusterNamespaceIsolationPolicies() throws PulsarAdminException { + public void clusterNamespaceIsolationPolicies() throws Exception { try { // create String policyName1 = "policy-1"; @@ -509,6 +511,7 @@ public void clusterNamespaceIsolationPolicies() throws PulsarAdminException { // Ok } + restartBroker(); } @Test @@ -526,7 +529,8 @@ public void brokers() throws Exception { Assert.assertEquals(list2.size(), 1); BrokerInfo leaderBroker = admin.brokers().getLeaderBroker(); - Assert.assertEquals(leaderBroker.getServiceUrl(), pulsar.getLeaderElectionService().getCurrentLeader().map(LeaderBroker::getServiceUrl).get()); + Assert.assertEquals(leaderBroker.getBrokerId(), + pulsar.getLeaderElectionService().getCurrentLeader().map(LeaderBroker::getBrokerId).get()); Map nsMap = admin.brokers().getOwnedNamespaces("test", list.get(0)); // since sla-monitor ns is not created nsMap.size() == 1 (for HeartBeat Namespace) @@ -534,7 +538,7 @@ public void brokers() throws Exception { for (String ns : nsMap.keySet()) { NamespaceOwnershipStatus nsStatus = nsMap.get(ns); if (ns.equals( - NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfiguration()) + NamespaceService.getHeartbeatNamespace(pulsar.getBrokerId(), pulsar.getConfiguration()) + "/0x00000000_0xffffffff")) { assertEquals(nsStatus.broker_assignment, BrokerAssignment.shared); assertFalse(nsStatus.is_controlled); @@ -542,10 +546,7 @@ public void brokers() throws Exception { } } - String[] parts = list.get(0).split(":"); - Assert.assertEquals(parts.length, 2); - Map nsMap2 = adminTls.brokers().getOwnedNamespaces("test", - String.format("%s:%d", parts[0], pulsar.getListenPortHTTPS().get())); + Map nsMap2 = adminTls.brokers().getOwnedNamespaces("test", list.get(0)); Assert.assertEquals(nsMap2.size(), 2); deleteNamespaceWithRetry("prop-xyz/ns1", false); @@ -700,6 +701,10 @@ public void testInvalidDynamicConfigContentInMetadata() throws Exception { Awaitility.await().until(() -> pulsar.getConfiguration().getBrokerShutdownTimeoutMs() == newValue); // verify value is updated assertEquals(pulsar.getConfiguration().getBrokerShutdownTimeoutMs(), newValue); + // reset config + pulsar.getConfiguration().setBrokerShutdownTimeoutMs(0L); + // restart broker + restartBroker(); } /** @@ -798,6 +803,8 @@ public void namespaces() throws Exception { TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test", "usw")); admin.tenants().updateTenant("prop-xyz", tenantInfo); + Awaitility.await().untilAsserted(() -> + assertEquals(admin.tenants().getTenantInfo("prop-xyz").getAllowedClusters(), Set.of("test", "usw"))); assertEquals(admin.namespaces().getPolicies("prop-xyz/ns1").bundles, PoliciesUtil.defaultBundle()); @@ -930,8 +937,7 @@ public void persistentTopics(String topicName) throws Exception { assertEquals(topicStats.getSubscriptions().get(subName).getConsumers().size(), 1); assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklog(), 10); assertEquals(topicStats.getPublishers().size(), 0); - assertEquals(topicStats.getOwnerBroker(), - pulsar.getAdvertisedAddress() + ":" + pulsar.getConfiguration().getWebServicePort().get()); + assertEquals(topicStats.getOwnerBroker(), pulsar.getBrokerId()); PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(persistentTopicName, false); assertEquals(internalStats.cursors.keySet(), Set.of(Codec.encode(subName))); @@ -1289,7 +1295,7 @@ public void testGetStats() throws Exception { TopicStats topicStats = admin.topics().getStats(topic, false, false, true); assertEquals(topicStats.getEarliestMsgPublishTimeInBacklogs(), 0); - assertEquals(topicStats.getSubscriptions().get(subName).getEarliestMsgPublishTimeInBacklog(), 0); + assertEquals(topicStats.getSubscriptions().get(subName).getEarliestMsgPublishTimeInBacklog(), -1); assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), -1); // publish several messages @@ -1309,7 +1315,7 @@ public void testGetStats() throws Exception { topicStats = admin.topics().getStats(topic, false, true, true); assertEquals(topicStats.getEarliestMsgPublishTimeInBacklogs(), 0); - assertEquals(topicStats.getSubscriptions().get(subName).getEarliestMsgPublishTimeInBacklog(), 0); + assertEquals(topicStats.getSubscriptions().get(subName).getEarliestMsgPublishTimeInBacklog(), -1); assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), 0); } @@ -3105,6 +3111,9 @@ public void testTopicBundleRangeLookup() throws PulsarAdminException, PulsarServ TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test", "usw")); admin.tenants().updateTenant("prop-xyz", tenantInfo); + Awaitility.await().untilAsserted(() -> + assertEquals(admin.tenants().getTenantInfo("prop-xyz").getAllowedClusters(), + tenantInfo.getAllowedClusters())); admin.namespaces().createNamespace("prop-xyz/getBundleNs", 100); assertEquals(admin.namespaces().getPolicies("prop-xyz/getBundleNs").bundles.getNumBundles(), 100); @@ -3298,6 +3307,9 @@ public void testCreateAndDeleteNamespaceWithBundles() throws Exception { TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test", "usw")); admin.tenants().updateTenant("prop-xyz", tenantInfo); + Awaitility.await().untilAsserted(() -> + assertEquals(admin.tenants().getTenantInfo("prop-xyz").getAllowedClusters(), + tenantInfo.getAllowedClusters())); String ns = BrokerTestUtil.newUniqueName("prop-xyz/ns"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java index 046f2b4cf14c6..25ef8d8aee1fb 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java @@ -302,7 +302,7 @@ public void clusters() throws Exception { NamespaceIsolationDataImpl policyData = NamespaceIsolationDataImpl.builder() .namespaces(Collections.singletonList("dummy/colo/ns")) - .primary(Collections.singletonList("localhost" + ":" + pulsar.getListenPortHTTP())) + .primary(Collections.singletonList(pulsar.getAdvertisedAddress())) .autoFailoverPolicy(AutoFailoverPolicyData.builder() .policyType(AutoFailoverPolicyType.min_available) .parameters(parameters1) @@ -722,11 +722,12 @@ public void brokers() throws Exception { assertTrue(res instanceof Set); Set activeBrokers = (Set) res; assertEquals(activeBrokers.size(), 1); - assertEquals(activeBrokers, Set.of(pulsar.getAdvertisedAddress() + ":" + pulsar.getListenPortHTTP().get())); + assertEquals(activeBrokers, Set.of(pulsar.getBrokerId())); Object leaderBrokerRes = asyncRequests(ctx -> brokers.getLeaderBroker(ctx)); assertTrue(leaderBrokerRes instanceof BrokerInfo); BrokerInfo leaderBroker = (BrokerInfo)leaderBrokerRes; - assertEquals(leaderBroker.getServiceUrl(), pulsar.getLeaderElectionService().getCurrentLeader().map(LeaderBroker::getServiceUrl).get()); + assertEquals(leaderBroker.getBrokerId(), + pulsar.getLeaderElectionService().getCurrentLeader().map(LeaderBroker::getBrokerId).get()); } @Test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java index 64b2a58ab86e8..f8aa3dc355d92 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AnalyzeBacklogSubscriptionTest.java @@ -154,17 +154,17 @@ private void verifyBacklog(String topic, String subscription, int numEntries, in AnalyzeSubscriptionBacklogResult analyzeSubscriptionBacklogResult = admin.topics().analyzeSubscriptionBacklog(topic, subscription, Optional.empty()); - assertEquals(numEntries, analyzeSubscriptionBacklogResult.getEntries()); - assertEquals(numEntries, analyzeSubscriptionBacklogResult.getFilterAcceptedEntries()); - assertEquals(0, analyzeSubscriptionBacklogResult.getFilterRejectedEntries()); - assertEquals(0, analyzeSubscriptionBacklogResult.getFilterRescheduledEntries()); - assertEquals(0, analyzeSubscriptionBacklogResult.getFilterRescheduledEntries()); + assertEquals(analyzeSubscriptionBacklogResult.getEntries(), numEntries); + assertEquals(analyzeSubscriptionBacklogResult.getFilterAcceptedEntries(), numEntries); + assertEquals(analyzeSubscriptionBacklogResult.getFilterRejectedEntries(), 0); + assertEquals(analyzeSubscriptionBacklogResult.getFilterRescheduledEntries(), 0); + assertEquals(analyzeSubscriptionBacklogResult.getFilterRescheduledEntries(), 0); - assertEquals(numMessages, analyzeSubscriptionBacklogResult.getMessages()); - assertEquals(numMessages, analyzeSubscriptionBacklogResult.getFilterAcceptedMessages()); - assertEquals(0, analyzeSubscriptionBacklogResult.getFilterRejectedMessages()); + assertEquals(analyzeSubscriptionBacklogResult.getMessages(), numMessages); + assertEquals(analyzeSubscriptionBacklogResult.getFilterAcceptedMessages(), numMessages); + assertEquals(analyzeSubscriptionBacklogResult.getFilterRejectedMessages(), 0); - assertEquals(0, analyzeSubscriptionBacklogResult.getFilterRescheduledMessages()); + assertEquals(analyzeSubscriptionBacklogResult.getFilterRescheduledMessages(), 0); assertFalse(analyzeSubscriptionBacklogResult.isAborted()); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java index 5644be406a7ee..53b170d52e6d3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java @@ -32,6 +32,7 @@ import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import java.lang.reflect.Field; @@ -1294,6 +1295,14 @@ public void testForceDeleteNamespace() throws Exception { pulsar.getConfiguration().setForceDeleteNamespaceAllowed(false); } + @Test + public void testSetNamespaceReplicationCluters() throws Exception { + String namespace = BrokerTestUtil.newUniqueName(this.testTenant + "/namespace"); + admin.namespaces().createNamespace(namespace, 100); + assertThrows(PulsarAdminException.PreconditionFailedException.class, + () -> admin.namespaces().setNamespaceReplicationClusters(namespace, Set.of())); + } + @Test public void testForceDeleteNamespaceNotAllowed() throws Exception { assertFalse(pulsar.getConfiguration().isForceDeleteNamespaceAllowed()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java index a4f6bd4650f7c..1f755234009ba 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java @@ -31,6 +31,8 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertSame; import static org.testng.Assert.assertTrue; import java.lang.reflect.Field; import java.util.ArrayList; @@ -52,9 +54,9 @@ import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.broker.admin.v2.ExtPersistentTopics; import org.apache.pulsar.broker.admin.v2.NonPersistentTopics; import org.apache.pulsar.broker.admin.v2.PersistentTopics; -import org.apache.pulsar.broker.admin.v2.ExtPersistentTopics; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.authentication.AuthenticationDataHttps; import org.apache.pulsar.broker.namespace.NamespaceService; @@ -65,6 +67,7 @@ import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.web.PulsarWebResource; import org.apache.pulsar.broker.web.RestException; +import org.apache.pulsar.client.admin.LongRunningProcessStatus; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.admin.Topics; @@ -87,6 +90,7 @@ import org.apache.pulsar.common.partition.PartitionedTopicMetadata; import org.apache.pulsar.common.policies.data.AuthAction; import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.data.RetentionPolicies; import org.apache.pulsar.common.policies.data.TenantInfoImpl; @@ -127,6 +131,7 @@ public void initPersistentTopics() throws Exception { @Override @BeforeMethod protected void setup() throws Exception { + conf.setTopicLevelPoliciesEnabled(false); super.internalSetup(); persistentTopics = spy(PersistentTopics.class); persistentTopics.setServletContext(new MockServletContext()); @@ -1357,21 +1362,12 @@ public void testGetMessageById() throws Exception { Message message2 = admin.topics().getMessageById(topicName2, id2.getLedgerId(), id2.getEntryId()); Assert.assertEquals(message2.getData(), data2.getBytes()); - Message message3 = null; - try { - message3 = admin.topics().getMessageById(topicName2, id1.getLedgerId(), id1.getEntryId()); - Assert.fail(); - } catch (Exception e) { - Assert.assertNull(message3); - } - - Message message4 = null; - try { - message4 = admin.topics().getMessageById(topicName1, id2.getLedgerId(), id2.getEntryId()); - Assert.fail(); - } catch (Exception e) { - Assert.assertNull(message4); - } + Assert.expectThrows(PulsarAdminException.NotFoundException.class, () -> { + admin.topics().getMessageById(topicName2, id1.getLedgerId(), id1.getEntryId()); + }); + Assert.expectThrows(PulsarAdminException.NotFoundException.class, () -> { + admin.topics().getMessageById(topicName1, id2.getLedgerId(), id2.getEntryId()); + }); } @Test @@ -1452,6 +1448,69 @@ public void onSendAcknowledgement(Producer producer, Message message, MessageId .compareTo(id2) > 0); } + @Test + public void testGetMessageIdByTimestampWithCompaction() throws Exception { + TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test")); + admin.tenants().createTenant("tenant-xyz", tenantInfo); + admin.namespaces().createNamespace("tenant-xyz/ns-abc", Set.of("test")); + final String topicName = "persistent://tenant-xyz/ns-abc/testGetMessageIdByTimestampWithCompaction"; + admin.topics().createNonPartitionedTopic(topicName); + + Map publishTimeMap = new ConcurrentHashMap<>(); + @Cleanup + ProducerBase producer = (ProducerBase) pulsarClient.newProducer().topic(topicName) + .enableBatching(false) + .intercept(new ProducerInterceptor() { + @Override + public void close() { + + } + + @Override + public boolean eligible(Message message) { + return true; + } + + @Override + public Message beforeSend(Producer producer, Message message) { + return message; + } + + @Override + public void onSendAcknowledgement(Producer producer, Message message, MessageId msgId, + Throwable exception) { + publishTimeMap.put(message.getMessageId(), message.getPublishTime()); + } + }) + .create(); + + MessageId id1 = producer.newMessage().key("K1").value("test1".getBytes()).send(); + MessageId id2 = producer.newMessage().key("K2").value("test2".getBytes()).send(); + + long publish1 = publishTimeMap.get(id1); + long publish2 = publishTimeMap.get(id2); + Assert.assertTrue(publish1 < publish2); + + admin.topics().triggerCompaction(topicName); + Awaitility.await().untilAsserted(() -> + assertSame(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS)); + + admin.topics().unload(topicName); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(topicName, false); + assertEquals(internalStats.ledgers.size(), 1); + assertEquals(internalStats.ledgers.get(0).entries, 0); + }); + + Assert.assertEquals(admin.topics().getMessageIdByTimestamp(topicName, publish1 - 1), id1); + Assert.assertEquals(admin.topics().getMessageIdByTimestamp(topicName, publish1), id1); + Assert.assertEquals(admin.topics().getMessageIdByTimestamp(topicName, publish1 + 1), id2); + Assert.assertEquals(admin.topics().getMessageIdByTimestamp(topicName, publish2), id2); + Assert.assertTrue(admin.topics().getMessageIdByTimestamp(topicName, publish2 + 1) + .compareTo(id2) > 0); + } + @Test public void testGetBatchMessageIdByTimestamp() throws Exception { TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test")); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicAutoCreationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicAutoCreationTest.java index d447787d1b7cb..a75ae78cef393 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicAutoCreationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicAutoCreationTest.java @@ -27,7 +27,10 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -40,6 +43,7 @@ import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.partition.PartitionedTopicMetadata; +import org.apache.pulsar.common.policies.data.AutoTopicCreationOverride; import org.apache.pulsar.common.policies.data.TopicType; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -55,6 +59,7 @@ protected void setup() throws Exception { conf.setAllowAutoTopicCreationType(TopicType.PARTITIONED); conf.setAllowAutoTopicCreation(true); conf.setDefaultNumPartitions(3); + conf.setForceDeleteNamespaceAllowed(true); super.internalSetup(); super.producerBaseSetup(); } @@ -135,6 +140,8 @@ public void testPartitionedTopicAutoCreationForbiddenDuringNamespaceDeletion() new InetSocketAddress(pulsar.getAdvertisedAddress(), pulsar.getBrokerListenPort().get()); return CompletableFuture.completedFuture(Pair.of(brokerAddress, brokerAddress)); }); + final String topicPoliciesServiceInitException + = "Topic creation encountered an exception by initialize topic policies service"; // Creating a producer and creating a Consumer may trigger automatic topic // creation, let's try to create a Producer and a Consumer @@ -142,20 +149,24 @@ public void testPartitionedTopicAutoCreationForbiddenDuringNamespaceDeletion() .sendTimeout(1, TimeUnit.SECONDS) .topic(topic) .create()) { - } catch (PulsarClientException.LookupException expected) { - String msg = "Namespace bundle for topic (%s) not served by this instance"; + } catch (PulsarClientException.TopicDoesNotExistException expected) { + // Since the "policies.deleted" is "true", the value of "isAllowAutoTopicCreationAsync" will be false, + // so the "TopicDoesNotExistException" is expected. log.info("Expected error", expected); - assertTrue(expected.getMessage().contains(String.format(msg, topic))); + assertTrue(expected.getMessage().contains(topic) + || expected.getMessage().contains(topicPoliciesServiceInitException)); } try (Consumer ignored = pulsarClient.newConsumer() .topic(topic) .subscriptionName("test") .subscribe()) { - } catch (PulsarClientException.LookupException expected) { - String msg = "Namespace bundle for topic (%s) not served by this instance"; + } catch (PulsarClientException.TopicDoesNotExistException expected) { + // Since the "policies.deleted" is "true", the value of "isAllowAutoTopicCreationAsync" will be false, + // so the "TopicDoesNotExistException" is expected. log.info("Expected error", expected); - assertTrue(expected.getMessage().contains(String.format(msg, topic))); + assertTrue(expected.getMessage().contains(topic) + || expected.getMessage().contains(topicPoliciesServiceInitException)); } @@ -182,4 +193,56 @@ public void testPartitionedTopicAutoCreationForbiddenDuringNamespaceDeletion() } } + + @Test + public void testClientWithAutoCreationGotNotFoundException() throws PulsarAdminException, PulsarClientException { + final String namespace = "public/test_1"; + final String topicName = "persistent://public/test_1/test_auto_creation_got_not_found" + + System.currentTimeMillis(); + final int retryTimes = 30; + admin.namespaces().createNamespace(namespace); + admin.namespaces().setAutoTopicCreation(namespace, AutoTopicCreationOverride.builder() + .allowAutoTopicCreation(true) + .topicType("non-partitioned") + .build()); + + @Cleanup("shutdown") + final ExecutorService executor1 = Executors.newSingleThreadExecutor(); + + @Cleanup("shutdown") + final ExecutorService executor2 = Executors.newSingleThreadExecutor(); + + for (int i = 0; i < retryTimes; i++) { + final CompletableFuture adminListSub = CompletableFuture.runAsync(() -> { + try { + admin.topics().getSubscriptions(topicName); + } catch (PulsarAdminException e) { + throw new RuntimeException(e); + } + }, executor1); + + final CompletableFuture> consumerSub = CompletableFuture.supplyAsync(() -> { + try { + return pulsarClient.newConsumer() + .topic(topicName) + .subscriptionName("sub-1") + .subscribe(); + } catch (PulsarClientException e) { + throw new RuntimeException(e); + } + }, executor2); + + try { + adminListSub.join(); + } catch (Throwable ex) { + // we don't care the exception. + } + + consumerSub.join().close(); + admin.topics().delete(topicName, true); + } + + admin.namespaces().deleteNamespace(namespace, true); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesAuthZTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesAuthZTest.java new file mode 100644 index 0000000000000..f07b9a6c2aabf --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesAuthZTest.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import io.jsonwebtoken.Jwts; +import java.util.Set; +import java.util.UUID; +import lombok.Cleanup; +import lombok.SneakyThrows; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.common.policies.data.AuthAction; +import org.apache.pulsar.common.policies.data.RetentionPolicies; +import org.apache.pulsar.common.policies.data.TenantInfo; +import org.apache.pulsar.security.MockedPulsarStandalone; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import static org.awaitility.Awaitility.await; + + +public final class TopicPoliciesAuthZTest extends MockedPulsarStandalone { + + private PulsarAdmin superUserAdmin; + + private PulsarAdmin tenantManagerAdmin; + + private static final String TENANT_ADMIN_SUBJECT = UUID.randomUUID().toString(); + private static final String TENANT_ADMIN_TOKEN = Jwts.builder() + .claim("sub", TENANT_ADMIN_SUBJECT).signWith(SECRET_KEY).compact(); + + @SneakyThrows + @BeforeClass + public void before() { + configureTokenAuthentication(); + configureDefaultAuthorization(); + start(); + this.superUserAdmin =PulsarAdmin.builder() + .serviceHttpUrl(getPulsarService().getWebServiceAddress()) + .authentication(new AuthenticationToken(SUPER_USER_TOKEN)) + .build(); + final TenantInfo tenantInfo = superUserAdmin.tenants().getTenantInfo("public"); + tenantInfo.getAdminRoles().add(TENANT_ADMIN_SUBJECT); + superUserAdmin.tenants().updateTenant("public", tenantInfo); + this.tenantManagerAdmin = PulsarAdmin.builder() + .serviceHttpUrl(getPulsarService().getWebServiceAddress()) + .authentication(new AuthenticationToken(TENANT_ADMIN_TOKEN)) + .build(); + } + + + @SneakyThrows + @AfterClass + public void after() { + close(); + } + + + @SneakyThrows + @Test + public void testRetention() { + final String random = UUID.randomUUID().toString(); + final String topic = "persistent://public/default/" + random; + final String subject = UUID.randomUUID().toString(); + final String token = Jwts.builder() + .claim("sub", subject).signWith(SECRET_KEY).compact(); + superUserAdmin.topics().createNonPartitionedTopic(topic); + + @Cleanup + final PulsarAdmin subAdmin = PulsarAdmin.builder() + .serviceHttpUrl(getPulsarService().getWebServiceAddress()) + .authentication(new AuthenticationToken(token)) + .build(); + final RetentionPolicies definedRetentionPolicy = new RetentionPolicies(1, 1); + // test superuser + superUserAdmin.topicPolicies().setRetention(topic, definedRetentionPolicy); + + // because the topic policies is eventual consistency, we should wait here + await().untilAsserted(() -> { + final RetentionPolicies receivedRetentionPolicy = superUserAdmin.topicPolicies().getRetention(topic); + Assert.assertEquals(receivedRetentionPolicy, definedRetentionPolicy); + }); + superUserAdmin.topicPolicies().removeRetention(topic); + + await().untilAsserted(() -> { + final RetentionPolicies retention = superUserAdmin.topicPolicies().getRetention(topic); + Assert.assertNull(retention); + }); + + // test tenant manager + + tenantManagerAdmin.topicPolicies().setRetention(topic, definedRetentionPolicy); + await().untilAsserted(() -> { + final RetentionPolicies receivedRetentionPolicy = tenantManagerAdmin.topicPolicies().getRetention(topic); + Assert.assertEquals(receivedRetentionPolicy, definedRetentionPolicy); + }); + tenantManagerAdmin.topicPolicies().removeRetention(topic); + await().untilAsserted(() -> { + final RetentionPolicies retention = tenantManagerAdmin.topicPolicies().getRetention(topic); + Assert.assertNull(retention); + }); + + // test nobody + + try { + subAdmin.topicPolicies().getRetention(topic); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + + try { + + subAdmin.topicPolicies().setRetention(topic, definedRetentionPolicy); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + + try { + subAdmin.topicPolicies().removeRetention(topic); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + + // test sub user with permissions + for (AuthAction action : AuthAction.values()) { + superUserAdmin.namespaces().grantPermissionOnNamespace("public/default", + subject, Set.of(action)); + try { + subAdmin.topicPolicies().getRetention(topic); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + + try { + + subAdmin.topicPolicies().setRetention(topic, definedRetentionPolicy); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + + try { + subAdmin.topicPolicies().removeRetention(topic); + Assert.fail("unexpected behaviour"); + } catch (PulsarAdminException ex) { + Assert.assertTrue(ex instanceof PulsarAdminException.NotAuthorizedException); + } + superUserAdmin.namespaces().revokePermissionsOnNamespace("public/default", subject); + } + } + +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java index 87471f4972f8d..c5111b47d2082 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java @@ -23,6 +23,7 @@ import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import java.lang.reflect.Field; @@ -41,6 +42,8 @@ import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.ConfigHelper; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.namespace.NamespaceService; @@ -49,6 +52,7 @@ import org.apache.pulsar.broker.service.SystemTopicBasedTopicPoliciesService; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.DispatchRateLimiter; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.service.persistent.SubscribeRateLimiter; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -82,8 +86,11 @@ import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.policies.data.TopicStats; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.assertj.core.api.Assertions; import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -176,11 +183,11 @@ public void testTopicPolicyInitialValueWithNamespaceAlreadyLoaded() throws Excep //load the nameserver, but topic is not init. log.info("lookup:{}",admin.lookups().lookupTopic(topic)); - assertTrue(pulsar.getBrokerService().isTopicNsOwnedByBroker(topicName)); + assertTrue(pulsar.getBrokerService().isTopicNsOwnedByBrokerAsync(topicName).join()); assertFalse(pulsar.getBrokerService().getTopics().containsKey(topic)); //make sure namespace policy reader is fully started. Awaitility.await().untilAsserted(()-> { - assertTrue(policyService.getPoliciesCacheInit(topicName.getNamespaceObject())); + assertTrue(policyService.getPoliciesCacheInit(topicName.getNamespaceObject()).isDone()); }); //load the topic. @@ -2990,6 +2997,10 @@ public void testReplicatorClusterApi() throws Exception { admin.topics().removeReplicationClusters(topic); Awaitility.await().untilAsserted(() -> assertNull(admin.topics().getReplicationClusters(topic, false))); + + assertThrows(PulsarAdminException.PreconditionFailedException.class, () -> admin.topics().setReplicationClusters(topic, List.of())); + assertThrows(PulsarAdminException.PreconditionFailedException.class, () -> admin.topics().setReplicationClusters(topic, null)); + } @Test @@ -3156,4 +3167,49 @@ public void testProduceChangesWithEncryptionRequired() throws Exception { }); } + @Test + public void testUpdateRetentionWithPartialFailure() throws Exception { + String tpName = BrokerTestUtil.newUniqueName("persistent://" + myNamespace + "/tp"); + admin.topics().createNonPartitionedTopic(tpName); + + // Load topic up. + admin.topics().getInternalStats(tpName); + + // Inject an error that makes dispatch rate update fail. + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(tpName, false).join().get(); + ConcurrentOpenHashMap subscriptions = + WhiteboxImpl.getInternalState(persistentTopic, "subscriptions"); + PersistentSubscription mockedSubscription = Mockito.mock(PersistentSubscription.class); + Mockito.when(mockedSubscription.getDispatcher()).thenThrow(new RuntimeException("Mocked error: getDispatcher")); + subscriptions.put("mockedSubscription", mockedSubscription); + + // Update namespace-level retention policies. + RetentionPolicies retentionPolicies1 = new RetentionPolicies(1, 1); + admin.namespaces().setRetentionAsync(myNamespace, retentionPolicies1); + + // Verify: update retention will be success even if other component update throws exception. + Awaitility.await().untilAsserted(() -> { + ManagedLedgerImpl ML = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + assertEquals(ML.getConfig().getRetentionSizeInMB(), 1); + assertEquals(ML.getConfig().getRetentionTimeMillis(), 1 * 60 * 1000); + }); + + // Update topic-level retention policies. + RetentionPolicies retentionPolicies2 = new RetentionPolicies(2, 2); + admin.topics().setRetentionAsync(tpName, retentionPolicies2); + + // Verify: update retention will be success even if other component update throws exception. + Awaitility.await().untilAsserted(() -> { + ManagedLedgerImpl ML = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + assertEquals(ML.getConfig().getRetentionSizeInMB(), 2); + assertEquals(ML.getConfig().getRetentionTimeMillis(), 2 * 60 * 1000); + }); + + // Cleanup. + subscriptions.clear(); + admin.namespaces().removeRetention(myNamespace); + admin.topics().delete(tpName, false); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesWithBrokerRestartTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesWithBrokerRestartTest.java new file mode 100644 index 0000000000000..672fc2c95f890 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesWithBrokerRestartTest.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.common.policies.data.RetentionPolicies; +import org.awaitility.Awaitility; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.TimeUnit; + +@Slf4j +@Test(groups = "broker-admin") +public class TopicPoliciesWithBrokerRestartTest extends MockedPulsarServiceBaseTest { + + @Override + @BeforeClass(alwaysRun = true) + protected void setup() throws Exception { + super.internalSetup(); + setupDefaultTenantAndNamespace(); + } + + @Override + @AfterClass(alwaysRun = true) + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + + @Test + public void testRetentionWithBrokerRestart() throws Exception { + final int messages = 1_000; + final int topicNum = 500; + // (1) Init topic + admin.namespaces().createNamespace("public/retention"); + final String topicName = "persistent://public/retention/retention_with_broker_restart"; + admin.topics().createNonPartitionedTopic(topicName); + for (int i = 0; i < topicNum; i++) { + final String shadowTopicNames = topicName + "_" + i; + admin.topics().createNonPartitionedTopic(shadowTopicNames); + } + // (2) Set retention + final RetentionPolicies retentionPolicies = new RetentionPolicies(20, 20); + for (int i = 0; i < topicNum; i++) { + final String shadowTopicNames = topicName + "_" + i; + admin.topicPolicies().setRetention(shadowTopicNames, retentionPolicies); + } + admin.topicPolicies().setRetention(topicName, retentionPolicies); + // (3) Send messages + @Cleanup + final Producer publisher = pulsarClient.newProducer() + .topic(topicName) + .create(); + for (int i = 0; i < messages; i++) { + publisher.send((i + "").getBytes(StandardCharsets.UTF_8)); + } + // (4) Check configuration + Awaitility.await().untilAsserted(() -> { + final PersistentTopic persistentTopic1 = (PersistentTopic) + pulsar.getBrokerService().getTopic(topicName, true).join().get(); + final ManagedLedgerImpl managedLedger1 = (ManagedLedgerImpl) persistentTopic1.getManagedLedger(); + Assert.assertEquals(managedLedger1.getConfig().getRetentionSizeInMB(), 20); + Assert.assertEquals(managedLedger1.getConfig().getRetentionTimeMillis(), + TimeUnit.MINUTES.toMillis(20)); + }); + // (5) Restart broker + restartBroker(); + // (6) Check configuration again + for (int i = 0; i < topicNum; i++) { + final String shadowTopicNames = topicName + "_" + i; + admin.lookups().lookupTopic(shadowTopicNames); + final PersistentTopic persistentTopicTmp = (PersistentTopic) + pulsar.getBrokerService().getTopic(shadowTopicNames, true).join().get(); + final ManagedLedgerImpl managedLedgerTemp = (ManagedLedgerImpl) persistentTopicTmp.getManagedLedger(); + Assert.assertEquals(managedLedgerTemp.getConfig().getRetentionSizeInMB(), 20); + Assert.assertEquals(managedLedgerTemp.getConfig().getRetentionTimeMillis(), + TimeUnit.MINUTES.toMillis(20)); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java index efd8b66d754ac..234af7afa8d09 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java @@ -84,6 +84,8 @@ protected void setup() throws Exception { Set providers = new HashSet<>(); providers.add(AuthenticationProviderToken.class.getName()); conf.setAuthenticationProviders(providers); + conf.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); + conf.setBrokerClientAuthenticationParameters("token:" + ADMIN_TOKEN); super.internalSetup(); PulsarAdminBuilder pulsarAdminBuilder = PulsarAdmin.builder().serviceHttpUrl(brokerUrl != null ? brokerUrl.toString() : brokerUrlTls.toString()) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsWithoutTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsWithoutTlsTest.java new file mode 100644 index 0000000000000..88bf2f8f42108 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsWithoutTlsTest.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import lombok.Cleanup; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.authentication.AuthenticationDataHttps; +import org.apache.pulsar.broker.rest.Topics; +import org.apache.pulsar.broker.testcontext.PulsarTestContext; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.common.naming.TopicDomain; +import org.apache.pulsar.common.policies.data.ClusterDataImpl; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.util.ObjectMapperFactory; +import org.apache.pulsar.websocket.data.ProducerMessage; +import org.apache.pulsar.websocket.data.ProducerMessages; +import org.mockito.ArgumentCaptor; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import javax.ws.rs.container.AsyncResponse; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.net.URI; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; + +public class TopicsWithoutTlsTest extends MockedPulsarServiceBaseTest { + + private Topics topics; + private final String testLocalCluster = "test"; + private final String testTenant = "my-tenant"; + private final String testNamespace = "my-namespace"; + private final String testTopicName = "my-topic"; + + @Override + @BeforeMethod + protected void setup() throws Exception { + super.internalSetup(); + topics = spy(new Topics()); + topics.setPulsar(pulsar); + doReturn(TopicDomain.persistent.value()).when(topics).domain(); + doReturn("test-app").when(topics).clientAppId(); + doReturn(mock(AuthenticationDataHttps.class)).when(topics).clientAuthData(); + admin.clusters().createCluster(testLocalCluster, new ClusterDataImpl()); + admin.tenants().createTenant(testTenant, new TenantInfoImpl(Set.of("role1", "role2"), Set.of(testLocalCluster))); + admin.namespaces().createNamespace(testTenant + "/" + testNamespace, + Set.of(testLocalCluster)); + } + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + this.conf.setBrokerServicePortTls(Optional.empty()); + this.conf.setWebServicePortTls(Optional.empty()); + } + + @Override + @AfterMethod + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testLookUpWithRedirect() throws Exception { + String topicName = "persistent://" + testTenant + "/" + testNamespace + "/" + testTopicName; + URI requestPath = URI.create(pulsar.getWebServiceAddress() + "/topics/my-tenant/my-namespace/my-topic"); + //create topic on one broker + admin.topics().createNonPartitionedTopic(topicName); + conf.setBrokerServicePort(Optional.of(0)); + conf.setWebServicePort(Optional.of(0)); + @Cleanup + PulsarTestContext pulsarTestContext2 = createAdditionalPulsarTestContext(conf); + PulsarService pulsar2 = pulsarTestContext2.getPulsarService(); + doReturn(false).when(topics).isRequestHttps(); + UriInfo uriInfo = mock(UriInfo.class); + doReturn(requestPath).when(uriInfo).getRequestUri(); + FieldUtils.writeField(topics, "uri", uriInfo, true); + //do produce on another broker + topics.setPulsar(pulsar2); + AsyncResponse asyncResponse = mock(AsyncResponse.class); + ProducerMessages producerMessages = new ProducerMessages(); + producerMessages.setValueSchema(ObjectMapperFactory.getMapper().getObjectMapper(). + writeValueAsString(Schema.INT64.getSchemaInfo())); + String message = "[]"; + producerMessages.setMessages(createMessages(message)); + topics.produceOnPersistentTopic(asyncResponse, testTenant, testNamespace, testTopicName, false, producerMessages); + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(Response.class); + verify(asyncResponse, timeout(5000).times(1)).resume(responseCaptor.capture()); + // Verify got redirect response + Assert.assertEquals(responseCaptor.getValue().getStatusInfo(), Response.Status.TEMPORARY_REDIRECT); + // Verify URI point to address of broker the topic was created on + Assert.assertEquals(responseCaptor.getValue().getLocation().toString(), requestPath.toString()); + } + + private static List createMessages(String message) throws JsonProcessingException { + return ObjectMapperFactory.getMapper().reader() + .forType(new TypeReference>() { + }).readValue(message); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java index ab83c8fec032e..43ec01f47f614 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java @@ -450,7 +450,7 @@ public void brokers() throws Exception { for (String ns : nsMap.keySet()) { NamespaceOwnershipStatus nsStatus = nsMap.get(ns); if (ns.equals( - NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfiguration()) + NamespaceService.getHeartbeatNamespace(pulsar.getBrokerId(), pulsar.getConfiguration()) + "/0x00000000_0xffffffff")) { assertEquals(nsStatus.broker_assignment, BrokerAssignment.shared); assertFalse(nsStatus.is_controlled); @@ -458,10 +458,7 @@ public void brokers() throws Exception { } } - String[] parts = list.get(0).split(":"); - Assert.assertEquals(parts.length, 2); - Map nsMap2 = adminTls.brokers().getOwnedNamespaces("use", - String.format("%s:%d", parts[0], pulsar.getListenPortHTTPS().get())); + Map nsMap2 = adminTls.brokers().getOwnedNamespaces("use", list.get(0)); Assert.assertEquals(nsMap2.size(), 2); admin.namespaces().deleteNamespace("prop-xyz/use/ns1"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java index 1e5f4679492ad..049fd0f5f4400 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java @@ -163,7 +163,7 @@ public void testGetTransactionInBufferStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } try { pulsar.getBrokerService().getTopic(topic, false); @@ -173,7 +173,7 @@ public void testGetTransactionInBufferStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES).topic(topic).sendTimeout(0, TimeUnit.SECONDS).create(); @@ -208,7 +208,7 @@ public void testGetTransactionInPendingAckStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } try { pulsar.getBrokerService().getTopic(topic, false); @@ -219,7 +219,7 @@ public void testGetTransactionInPendingAckStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES).topic(topic).create(); @@ -334,7 +334,7 @@ public void testGetTransactionBufferStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } try { pulsar.getBrokerService().getTopic(topic, false); @@ -344,7 +344,7 @@ public void testGetTransactionBufferStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES) @@ -392,7 +392,7 @@ public void testGetPendingAckStats(String ackType) throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } try { pulsar.getBrokerService().getTopic(topic, false); @@ -402,7 +402,7 @@ public void testGetPendingAckStats(String ackType) throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } admin.topics().createNonPartitionedTopic(topic); @@ -541,7 +541,7 @@ public void testGetPendingAckInternalStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } try { pulsar.getBrokerService().getTopic(topic, false); @@ -551,7 +551,7 @@ public void testGetPendingAckInternalStats() throws Exception { } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); - assertEquals(cause.getMessage(), "Topic not found"); + assertTrue(cause.getMessage().contains("Topic not found")); } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES).topic(topic).create(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java index 6ffcecbeb9f8b..942a42fa7aaa1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthLogsTest.java @@ -60,6 +60,8 @@ public void setup() throws Exception { conf.setAuthorizationEnabled(true); conf.setAuthorizationAllowWildcardsMatching(true); conf.setSuperUserRoles(Sets.newHashSet("super")); + conf.setBrokerClientAuthenticationPlugin(MockAuthentication.class.getName()); + conf.setBrokerClientAuthenticationParameters("user:pass.pass"); internalSetup(); try (PulsarAdmin admin = PulsarAdmin.builder() diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthentication.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthentication.java index 0b1726617f71f..25ac59796b02c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthentication.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockAuthentication.java @@ -29,7 +29,10 @@ public class MockAuthentication implements Authentication { private static final Logger log = LoggerFactory.getLogger(MockAuthentication.class); - private final String user; + private String user; + + public MockAuthentication() { + } public MockAuthentication(String user) { this.user = user; @@ -67,6 +70,7 @@ public String getCommandData() { @Override public void configure(Map authParams) { + this.user = authParams.get("user"); } @Override diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java index c32d3fc3b0b27..28d66af8b265b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java @@ -19,8 +19,10 @@ package org.apache.pulsar.broker.auth; import static org.apache.pulsar.broker.BrokerTestUtil.spyWithoutRecordingInvocations; +import static org.testng.Assert.assertEquals; import com.google.common.collect.Sets; import java.lang.reflect.Field; +import java.lang.reflect.Method; import java.net.InetSocketAddress; import java.net.URI; import java.net.URL; @@ -37,10 +39,14 @@ import java.util.function.Predicate; import javax.ws.rs.container.AsyncResponse; import javax.ws.rs.container.TimeoutHandler; +import lombok.AllArgsConstructor; +import lombok.Data; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.authentication.AuthenticationProviderTls; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.service.BrokerTestBase; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.testcontext.PulsarTestContext; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminBuilder; @@ -48,6 +54,11 @@ import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.impl.auth.AuthenticationDisabled; +import org.apache.pulsar.client.impl.auth.AuthenticationTls; +import org.apache.pulsar.client.impl.ConnectionPool; +import org.apache.pulsar.client.impl.ProducerImpl; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.policies.data.TenantInfoImpl; @@ -56,6 +67,7 @@ import org.apache.pulsar.utils.ResourceUtils; import org.apache.zookeeper.MockZooKeeper; import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; import org.mockito.Mockito; import org.mockito.internal.util.MockUtil; import org.slf4j.Logger; @@ -221,16 +233,30 @@ protected void doInitConf() throws Exception { this.conf.setBrokerShutdownTimeoutMs(0L); this.conf.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); this.conf.setBrokerServicePort(Optional.of(0)); - this.conf.setBrokerServicePortTls(Optional.of(0)); this.conf.setAdvertisedAddress("localhost"); this.conf.setWebServicePort(Optional.of(0)); - this.conf.setWebServicePortTls(Optional.of(0)); this.conf.setNumExecutorThreadPoolSize(5); this.conf.setExposeBundlesMetricsInPrometheus(true); } protected final void init() throws Exception { doInitConf(); + // trying to config the broker internal client + if (conf.getWebServicePortTls().isPresent() + && conf.getAuthenticationProviders().contains(AuthenticationProviderTls.class.getName()) + && !conf.isTlsEnabledWithKeyStore()) { + // enabled TLS + if (conf.getBrokerClientAuthenticationPlugin() == null + || conf.getBrokerClientAuthenticationPlugin().equals(AuthenticationDisabled.class.getName())) { + conf.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); + conf.setBrokerClientAuthenticationParameters("tlsCertFile:" + BROKER_CERT_FILE_PATH + + ",tlsKeyFile:" + BROKER_KEY_FILE_PATH); + conf.setBrokerClientTlsEnabled(true); + conf.setBrokerClientTrustCertsFilePath(CA_CERT_FILE_PATH); + conf.setBrokerClientCertificateFilePath(BROKER_CERT_FILE_PATH); + conf.setBrokerClientKeyFilePath(BROKER_KEY_FILE_PATH); + } + } startBroker(); } @@ -464,9 +490,7 @@ protected ServiceConfiguration getDefaultConf() { configuration.setBrokerShutdownTimeoutMs(0L); configuration.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); configuration.setBrokerServicePort(Optional.of(0)); - configuration.setBrokerServicePortTls(Optional.of(0)); configuration.setWebServicePort(Optional.of(0)); - configuration.setWebServicePortTls(Optional.of(0)); configuration.setBookkeeperClientExposeStatsToPrometheus(true); configuration.setNumExecutorThreadPoolSize(5); configuration.setBrokerMaxConnections(0); @@ -644,5 +668,43 @@ public Object[][] incorrectPersistentPolicies() { }; } + protected ServiceProducer getServiceProducer(ProducerImpl clientProducer, String topicName) { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topicName, false).join().get(); + org.apache.pulsar.broker.service.Producer serviceProducer = + persistentTopic.getProducers().get(clientProducer.getProducerName()); + long clientProducerId = WhiteboxImpl.getInternalState(clientProducer, "producerId"); + assertEquals(serviceProducer.getProducerId(), clientProducerId); + assertEquals(serviceProducer.getEpoch(), clientProducer.getConnectionHandler().getEpoch()); + return new ServiceProducer(serviceProducer, persistentTopic); + } + + @Data + @AllArgsConstructor + public static class ServiceProducer { + private org.apache.pulsar.broker.service.Producer serviceProducer; + private PersistentTopic persistentTopic; + } + + protected void sleepSeconds(int seconds){ + try { + Thread.sleep(1000 * seconds); + } catch (InterruptedException e) { + log.warn("This thread has been interrupted", e); + Thread.currentThread().interrupt(); + } + } + + private static void reconnectAllConnections(PulsarClientImpl c) throws Exception { + ConnectionPool pool = c.getCnxPool(); + Method closeAllConnections = ConnectionPool.class.getDeclaredMethod("closeAllConnections", new Class[]{}); + closeAllConnections.setAccessible(true); + closeAllConnections.invoke(pool, new Object[]{}); + } + + protected void reconnectAllConnections() throws Exception { + reconnectAllConnections((PulsarClientImpl) pulsarClient); + } + private static final Logger log = LoggerFactory.getLogger(MockedPulsarServiceBaseTest.class); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersMultiBrokerLeaderElectionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersMultiBrokerLeaderElectionTest.java new file mode 100644 index 0000000000000..5adc78b2c5212 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersMultiBrokerLeaderElectionTest.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.loadbalance; + +import java.util.Optional; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.testcontext.PulsarTestContext; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class AdvertisedListenersMultiBrokerLeaderElectionTest extends MultiBrokerLeaderElectionTest { + @Override + protected PulsarTestContext.Builder createPulsarTestContextBuilder(ServiceConfiguration conf) { + conf.setWebServicePortTls(Optional.of(0)); + return super.createPulsarTestContextBuilder(conf).preallocatePorts(true).configOverride(config -> { + // use advertised address that is different than the name used in the advertised listeners + config.setAdvertisedAddress("localhost"); + config.setAdvertisedListeners( + "public_pulsar:pulsar://127.0.0.1:" + config.getBrokerServicePort().get() + + ",public_http:http://127.0.0.1:" + config.getWebServicePort().get() + + ",public_https:https://127.0.0.1:" + config.getWebServicePortTls().get()); + }); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java index 7a8154312e4dc..a88ccd60ae4c4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java @@ -78,7 +78,6 @@ private void updateConfig(ServiceConfiguration conf, String advertisedAddress) { ",public_https:https://localhost:" + httpsPort); conf.setBrokerServicePort(Optional.of(pulsarPort)); conf.setWebServicePort(Optional.of(httpPort)); - conf.setWebServicePortTls(Optional.of(httpsPort)); } @Test @@ -101,7 +100,6 @@ public void testLookup() throws Exception { assertEquals(new URI(ld.getBrokerUrl()).getHost(), "localhost"); assertEquals(new URI(ld.getHttpUrl()).getHost(), "localhost"); - assertEquals(new URI(ld.getHttpUrlTls()).getHost(), "localhost"); // Produce data diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java index 560cfa9216a02..5fbda961c0e3d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java @@ -103,14 +103,14 @@ public void setup() throws Exception { setupConfigs(conf); super.internalSetup(conf); pulsar1 = pulsar; - primaryHost = String.format("%s:%d", "localhost", pulsar1.getListenPortHTTP().get()); + primaryHost = pulsar1.getBrokerId(); admin1 = admin; var config2 = getDefaultConf(); setupConfigs(config2); additionalPulsarTestContext = createAdditionalPulsarTestContext(config2); pulsar2 = additionalPulsarTestContext.getPulsarService(); - secondaryHost = String.format("%s:%d", "localhost", pulsar2.getListenPortHTTP().get()); + secondaryHost = pulsar2.getBrokerId(); primaryLoadManager = getField(pulsar1.getLoadManager().get(), "loadManager"); secondaryLoadManager = getField(pulsar2.getLoadManager().get(), "loadManager"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java index 62faa70bbcb76..ded4ee8e58d53 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java @@ -115,7 +115,8 @@ public void anErrorShouldBeThrowBeforeLeaderElected() throws PulsarServerExcepti checkLookupException(tenant, namespace, client); // broker, webService and leaderElectionService is started, and elect is done; - leaderBrokerReference.set(new LeaderBroker(pulsar.getWebServiceAddress())); + leaderBrokerReference.set( + new LeaderBroker(pulsar.getBrokerId(), pulsar.getSafeWebServiceAddress())); Producer producer = client.newProducer() .topic("persistent://" + tenant + "/" + namespace + "/1p") @@ -129,10 +130,10 @@ private void checkLookupException(String tenant, String namespace, PulsarClient .topic("persistent://" + tenant + "/" + namespace + "/1p") .create(); } catch (PulsarClientException t) { - Assert.assertTrue(t instanceof PulsarClientException.LookupException); + Assert.assertTrue(t instanceof PulsarClientException.BrokerMetadataException + || t instanceof PulsarClientException.LookupException); Assert.assertTrue( - t.getMessage().contains( - "java.lang.IllegalStateException: The leader election has not yet been completed!")); + t.getMessage().contains("The leader election has not yet been completed")); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtilsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtilsTest.java new file mode 100644 index 0000000000000..ac21b30bdde51 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LinuxInfoUtilsTest.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.loadbalance; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mockStatic; +import static org.testng.Assert.assertEquals; +import java.nio.file.Files; +import java.util.stream.Stream; +import lombok.extern.slf4j.Slf4j; +import org.mockito.MockedStatic; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class LinuxInfoUtilsTest { + + /** + * simulate reading first line of /proc/stat to get total cpu usage. + */ + @Test + public void testGetCpuUsageForEntireHost(){ + try (MockedStatic filesMockedStatic = mockStatic(Files.class)) { + filesMockedStatic.when(() -> Files.lines(any())).thenReturn( + Stream.generate(() -> "cpu 317808 128 58637 2503692 7634 0 13472 0 0 0")); + long idle = 2503692 + 7634, total = 2901371; + LinuxInfoUtils.ResourceUsage resourceUsage = LinuxInfoUtils.ResourceUsage.builder() + .usage(total - idle) + .idle(idle) + .total(total).build(); + assertEquals(LinuxInfoUtils.getCpuUsageForEntireHost(), resourceUsage); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadBalancerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadBalancerTest.java index 68902c73e5717..04a2175b1d10f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadBalancerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadBalancerTest.java @@ -124,7 +124,6 @@ void setup() throws Exception { config.setAdvertisedAddress("localhost"); config.setWebServicePort(Optional.of(0)); config.setBrokerServicePortTls(Optional.of(0)); - config.setWebServicePortTls(Optional.of(0)); config.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config.setBrokerShutdownTimeoutMs(0L); config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); @@ -139,7 +138,7 @@ void setup() throws Exception { brokerNativeBrokerPorts[i] = pulsarServices[i].getBrokerListenPort().get(); brokerUrls[i] = new URL("http://127.0.0.1" + ":" + brokerWebServicePorts[i]); - lookupAddresses[i] = pulsarServices[i].getAdvertisedAddress() + ":" + pulsarServices[i].getListenPortHTTP().get(); + lookupAddresses[i] = pulsarServices[i].getBrokerId(); pulsarAdmins[i] = PulsarAdmin.builder().serviceHttpUrl(brokerUrls[i].toString()).build(); } @@ -410,7 +409,7 @@ public void testTopicAssignmentWithExistingBundles() throws Exception { double expectedMaxVariation = 10.0; for (int i = 0; i < BROKER_COUNT; i++) { long actualValue = 0; - String resourceId = "http://" + lookupAddresses[i]; + String resourceId = lookupAddresses[i]; if (namespaceOwner.containsKey(resourceId)) { actualValue = namespaceOwner.get(resourceId); } @@ -696,7 +695,7 @@ public void testLeaderElection() throws Exception { } } // Make sure all brokers see the same leader - log.info("Old leader is : {}", oldLeader.getServiceUrl()); + log.info("Old leader is : {}", oldLeader.getBrokerId()); for (PulsarService pulsar : activePulsar) { log.info("Current leader for {} is : {}", pulsar.getWebServiceAddress(), pulsar.getLeaderElectionService().getCurrentLeader()); assertEquals(pulsar.getLeaderElectionService().readCurrentLeader().join(), Optional.of(oldLeader)); @@ -706,7 +705,7 @@ public void testLeaderElection() throws Exception { leaderPulsar.close(); loopUntilLeaderChangesForAllBroker(followerPulsar, oldLeader); LeaderBroker newLeader = followerPulsar.get(0).getLeaderElectionService().readCurrentLeader().join().get(); - log.info("New leader is : {}", newLeader.getServiceUrl()); + log.info("New leader is : {}", newLeader.getBrokerId()); Assert.assertNotEquals(newLeader, oldLeader); } } @@ -744,7 +743,7 @@ private void createNamespacePolicies(PulsarService pulsar) throws Exception { // set up policy that use this broker as secondary policyData = NamespaceIsolationData.builder() .namespaces(Collections.singletonList("pulsar/use/secondary-ns.*")) - .primary(Collections.singletonList(pulsarServices[0].getWebServiceAddress())) + .primary(Collections.singletonList(pulsarServices[0].getAdvertisedAddress())) .secondary(allExceptFirstBroker) .autoFailoverPolicy(AutoFailoverPolicyData.builder() .policyType(AutoFailoverPolicyType.min_available) @@ -756,7 +755,7 @@ private void createNamespacePolicies(PulsarService pulsar) throws Exception { // set up policy that do not use this broker (neither primary nor secondary) policyData = NamespaceIsolationData.builder() .namespaces(Collections.singletonList("pulsar/use/shared-ns.*")) - .primary(Collections.singletonList(pulsarServices[0].getWebServiceAddress())) + .primary(Collections.singletonList(pulsarServices[0].getAdvertisedAddress())) .secondary(allExceptFirstBroker) .autoFailoverPolicy(AutoFailoverPolicyData.builder() .policyType(AutoFailoverPolicyType.min_available) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java index 5c840129dd8d5..103983506a6dd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java @@ -20,6 +20,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; import java.util.Optional; @@ -36,14 +37,24 @@ import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.MultiBrokerTestZKBaseTest; import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.client.admin.Lookup; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.impl.LookupService; +import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.common.naming.TopicName; import org.awaitility.Awaitility; import org.testng.annotations.Test; @Slf4j @Test(groups = "broker") public class MultiBrokerLeaderElectionTest extends MultiBrokerTestZKBaseTest { + public MultiBrokerLeaderElectionTest() { + super(); + this.isTcpLookup = true; + } + @Override protected int numberOfAdditionalBrokers() { return 9; @@ -88,39 +99,80 @@ public void shouldAllBrokersBeAbleToGetTheLeader() { }); } - @Test - public void shouldProvideConsistentAnswerToTopicLookups() + @Test(timeOut = 60000L) + public void shouldProvideConsistentAnswerToTopicLookupsUsingAdminApi() throws PulsarAdminException, ExecutionException, InterruptedException { - String topicNameBase = "persistent://public/default/lookuptest" + UUID.randomUUID() + "-"; + String namespace = "public/ns" + UUID.randomUUID(); + admin.namespaces().createNamespace(namespace, 256); + String topicNameBase = "persistent://" + namespace + "/lookuptest-"; List topicNames = IntStream.range(0, 500).mapToObj(i -> topicNameBase + i) .collect(Collectors.toList()); List allAdmins = getAllAdmins(); @Cleanup("shutdown") ExecutorService executorService = Executors.newFixedThreadPool(allAdmins.size()); List>> resultFutures = new ArrayList<>(); - String leaderBrokerUrl = admin.brokers().getLeaderBroker().getServiceUrl(); - log.info("LEADER is {}", leaderBrokerUrl); // use Phaser to increase the chances of a race condition by triggering all threads once - // they are waiting just before the lookupTopic call + // they are waiting just before each lookupTopic call final Phaser phaser = new Phaser(1); for (PulsarAdmin brokerAdmin : allAdmins) { - if (!leaderBrokerUrl.equals(brokerAdmin.getServiceUrl())) { - phaser.register(); - log.info("Doing lookup to broker {}", brokerAdmin.getServiceUrl()); - resultFutures.add(executorService.submit(() -> { - phaser.arriveAndAwaitAdvance(); - return topicNames.stream().map(topicName -> { - try { - return brokerAdmin.lookups().lookupTopic(topicName); - } catch (PulsarAdminException e) { - log.error("Error looking up topic {} in {}", topicName, brokerAdmin.getServiceUrl()); - throw new RuntimeException(e); - } - }).collect(Collectors.toList()); - })); + phaser.register(); + Lookup lookups = brokerAdmin.lookups(); + log.info("Doing lookup to broker {}", brokerAdmin.getServiceUrl()); + resultFutures.add(executorService.submit(() -> topicNames.stream().map(topicName -> { + phaser.arriveAndAwaitAdvance(); + try { + return lookups.lookupTopic(topicName); + } catch (PulsarAdminException e) { + log.error("Error looking up topic {} in {}", topicName, brokerAdmin.getServiceUrl()); + throw new RuntimeException(e); + } + }).collect(Collectors.toList()))); + } + phaser.arriveAndDeregister(); + List firstResult = null; + for (Future> resultFuture : resultFutures) { + List result = resultFuture.get(); + if (firstResult == null) { + firstResult = result; + } else { + assertEquals(result, firstResult, "The lookup results weren't consistent."); } } - phaser.arriveAndAwaitAdvance(); + } + + @Test(timeOut = 60000L) + public void shouldProvideConsistentAnswerToTopicLookupsUsingClient() + throws PulsarAdminException, ExecutionException, InterruptedException { + String namespace = "public/ns" + UUID.randomUUID(); + admin.namespaces().createNamespace(namespace, 256); + String topicNameBase = "persistent://" + namespace + "/lookuptest-"; + List topicNames = IntStream.range(0, 500).mapToObj(i -> topicNameBase + i) + .collect(Collectors.toList()); + List allClients = getAllClients(); + @Cleanup("shutdown") + ExecutorService executorService = Executors.newFixedThreadPool(allClients.size()); + List>> resultFutures = new ArrayList<>(); + // use Phaser to increase the chances of a race condition by triggering all threads once + // they are waiting just before each lookupTopic call + final Phaser phaser = new Phaser(1); + for (PulsarClient brokerClient : allClients) { + phaser.register(); + String serviceUrl = ((PulsarClientImpl) brokerClient).getConfiguration().getServiceUrl(); + LookupService lookupService = ((PulsarClientImpl) brokerClient).getLookup(); + log.info("Doing lookup to broker {}", serviceUrl); + resultFutures.add(executorService.submit(() -> topicNames.stream().map(topicName -> { + phaser.arriveAndAwaitAdvance(); + try { + InetSocketAddress logicalAddress = + lookupService.getBroker(TopicName.get(topicName)).get().getLeft(); + return logicalAddress.getHostString() + ":" + logicalAddress.getPort(); + } catch (InterruptedException | ExecutionException e) { + log.error("Error looking up topic {} in {}", topicName, serviceUrl); + throw new RuntimeException(e); + } + }).collect(Collectors.toList()))); + } + phaser.arriveAndDeregister(); List firstResult = null; for (Future> resultFuture : resultFutures) { List result = resultFuture.get(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java index c4898786e3e03..f92faa3e4bd54 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java @@ -55,12 +55,16 @@ import org.apache.pulsar.broker.loadbalance.impl.SimpleResourceUnit; import org.apache.pulsar.client.admin.BrokerStats; import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.policies.data.AutoFailoverPolicyData; import org.apache.pulsar.common.policies.data.AutoFailoverPolicyType; +import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.NamespaceIsolationData; import org.apache.pulsar.common.policies.data.ResourceQuota; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.policies.data.TopicStats; import org.apache.pulsar.common.policies.impl.NamespaceIsolationPolicies; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.metadata.api.MetadataStoreException.BadVersionException; @@ -71,6 +75,7 @@ import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.SystemResourceUsage; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; +import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -92,8 +97,14 @@ public class SimpleLoadManagerImplTest { BrokerStats brokerStatsClient2; String primaryHost; + + String primaryTlsHost; + String secondaryHost; + private String defaultNamespace; + private String defaultTenant; + ExecutorService executor = new ThreadPoolExecutor(5, 20, 30, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); @BeforeMethod @@ -107,13 +118,13 @@ void setup() throws Exception { ServiceConfiguration config1 = new ServiceConfiguration(); config1.setClusterName("use"); config1.setWebServicePort(Optional.of(0)); + config1.setWebServicePortTls(Optional.of(0)); config1.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config1.setBrokerShutdownTimeoutMs(0L); config1.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); config1.setBrokerServicePort(Optional.of(0)); config1.setLoadManagerClassName(SimpleLoadManagerImpl.class.getName()); config1.setBrokerServicePortTls(Optional.of(0)); - config1.setWebServicePortTls(Optional.of(0)); config1.setAdvertisedAddress("localhost"); pulsar1 = new PulsarService(config1); pulsar1.start(); @@ -122,11 +133,13 @@ void setup() throws Exception { admin1 = PulsarAdmin.builder().serviceHttpUrl(url1.toString()).build(); brokerStatsClient1 = admin1.brokerStats(); primaryHost = pulsar1.getWebServiceAddress(); + primaryTlsHost = pulsar1.getWebServiceAddressTls(); // Start broker 2 ServiceConfiguration config2 = new ServiceConfiguration(); config2.setClusterName("use"); config2.setWebServicePort(Optional.of(0)); + config2.setWebServicePortTls(Optional.of(0)); config2.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config2.setBrokerShutdownTimeoutMs(0L); config2.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); @@ -143,6 +156,8 @@ void setup() throws Exception { brokerStatsClient2 = admin2.brokerStats(); secondaryHost = pulsar2.getWebServiceAddress(); Thread.sleep(100); + + setupClusters(); } @AfterMethod(alwaysRun = true) @@ -196,7 +211,7 @@ public void testBasicBrokerSelection() throws Exception { rd.put("bandwidthIn", new ResourceUsage(250 * 1024, 1024 * 1024)); rd.put("bandwidthOut", new ResourceUsage(550 * 1024, 1024 * 1024)); - ResourceUnit ru1 = new SimpleResourceUnit("http://prod2-broker7.messaging.usw.example.com:8080", rd); + ResourceUnit ru1 = new SimpleResourceUnit("prod2-broker7.messaging.usw.example.com:8080", rd); Set rus = new HashSet<>(); rus.add(ru1); LoadRanker lr = new ResourceAvailabilityRanker(); @@ -231,15 +246,15 @@ public void testPrimary() throws Exception { rd.put("bandwidthIn", new ResourceUsage(250 * 1024, 1024 * 1024)); rd.put("bandwidthOut", new ResourceUsage(550 * 1024, 1024 * 1024)); - ResourceUnit ru1 = new SimpleResourceUnit( - "http://" + pulsar1.getAdvertisedAddress() + ":" + pulsar1.getConfiguration().getWebServicePort().get(), rd); + ResourceUnit ru1 = new SimpleResourceUnit(pulsar1.getBrokerId(), rd); Set rus = new HashSet<>(); rus.add(ru1); LoadRanker lr = new ResourceAvailabilityRanker(); // inject the load report and rankings Map loadReports = new HashMap<>(); - org.apache.pulsar.policies.data.loadbalancer.LoadReport loadReport = new org.apache.pulsar.policies.data.loadbalancer.LoadReport(); + org.apache.pulsar.policies.data.loadbalancer.LoadReport loadReport = + new org.apache.pulsar.policies.data.loadbalancer.LoadReport(); loadReport.setSystemResourceUsage(new SystemResourceUsage()); loadReports.put(ru1, loadReport); setObjectField(SimpleLoadManagerImpl.class, loadManager, "currentLoadReports", loadReports); @@ -254,11 +269,9 @@ public void testPrimary() throws Exception { sortedRankingsInstance.get().put(lr.getRank(rd), rus); setObjectField(SimpleLoadManagerImpl.class, loadManager, "sortedRankings", sortedRankingsInstance); - ResourceUnit found = loadManager - .getLeastLoaded(NamespaceName.get("pulsar/use/primary-ns.10")).get(); - // broker is not active so found should be null + ResourceUnit found = loadManager.getLeastLoaded(NamespaceName.get("pulsar/use/primary-ns.10")).get(); + // TODO: this test doesn't make sense. This was the original assertion. assertNotEquals(found, null, "did not find a broker when expected one to be found"); - } @Test(enabled = false) @@ -272,7 +285,7 @@ public void testPrimarySecondary() throws Exception { rd.put("bandwidthIn", new ResourceUsage(250 * 1024, 1024 * 1024)); rd.put("bandwidthOut", new ResourceUsage(550 * 1024, 1024 * 1024)); - ResourceUnit ru1 = new SimpleResourceUnit("http://prod2-broker7.messaging.usw.example.com:8080", rd); + ResourceUnit ru1 = new SimpleResourceUnit("prod2-broker7.messaging.usw.example.com:8080", rd); Set rus = new HashSet<>(); rus.add(ru1); LoadRanker lr = new ResourceAvailabilityRanker(); @@ -341,8 +354,8 @@ public void testDoLoadShedding() throws Exception { rd.put("bandwidthIn", new ResourceUsage(250 * 1024, 1024 * 1024)); rd.put("bandwidthOut", new ResourceUsage(550 * 1024, 1024 * 1024)); - ResourceUnit ru1 = new SimpleResourceUnit("http://pulsar-broker1.com:8080", rd); - ResourceUnit ru2 = new SimpleResourceUnit("http://pulsar-broker2.com:8080", rd); + ResourceUnit ru1 = new SimpleResourceUnit("pulsar-broker1.com:8080", rd); + ResourceUnit ru2 = new SimpleResourceUnit("pulsar-broker2.com:8080", rd); Set rus = new HashSet<>(); rus.add(ru1); rus.add(ru2); @@ -395,14 +408,14 @@ public void testEvenBundleDistribution() throws Exception { final SimpleLoadManagerImpl loadManager = (SimpleLoadManagerImpl) pulsar1.getLoadManager().get(); for (final NamespaceBundle bundle : bundles) { - if (loadManager.getLeastLoaded(bundle).get().getResourceId().equals(primaryHost)) { + if (loadManager.getLeastLoaded(bundle).get().getResourceId().equals(pulsar1.getBrokerId())) { ++numAssignedToPrimary; } else { ++numAssignedToSecondary; } // Check that number of assigned bundles are equivalent when an even number have been assigned. if ((numAssignedToPrimary + numAssignedToSecondary) % 2 == 0) { - assert (numAssignedToPrimary == numAssignedToSecondary); + assertEquals(numAssignedToPrimary, numAssignedToSecondary); } } } @@ -475,4 +488,34 @@ public void testUsage() { assertEquals(usage.getBandwidthIn().usage, usageLimit); } + @Test + public void testGetWebSerUrl() throws PulsarAdminException { + String webServiceUrl = admin1.brokerStats().getLoadReport().getWebServiceUrl(); + Assert.assertEquals(webServiceUrl, pulsar1.getWebServiceAddress()); + + String webServiceUrl2 = admin2.brokerStats().getLoadReport().getWebServiceUrl(); + Assert.assertEquals(webServiceUrl2, pulsar2.getWebServiceAddress()); + } + + @Test + public void testRedirectOwner() throws PulsarAdminException { + final String topicName = "persistent://" + defaultNamespace + "/" + "test-topic"; + admin1.topics().createNonPartitionedTopic(topicName); + TopicStats stats = admin1.topics().getStats(topicName); + Assert.assertNotNull(stats); + + TopicStats stats2 = admin2.topics().getStats(topicName); + Assert.assertNotNull(stats2); + } + + private void setupClusters() throws PulsarAdminException { + admin1.clusters().createCluster("use", ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()) + .brokerServiceUrl(pulsar1.getBrokerServiceUrl()).build()); + TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("use")); + defaultTenant = "prop-xyz"; + admin1.tenants().createTenant(defaultTenant, tenantInfo); + defaultNamespace = defaultTenant + "/ns1"; + admin1.namespaces().createNamespace(defaultNamespace, Set.of("use")); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryTest.java index fca41837b9df4..fdd1eb7272c30 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/BrokerRegistryTest.java @@ -75,7 +75,7 @@ public class BrokerRegistryTest { private LocalBookkeeperEnsemble bkEnsemble; - // Make sure the load manager don't register itself to `/loadbalance/brokers/{lookupServiceAddress}` + // Make sure the load manager don't register itself to `/loadbalance/brokers/{brokerId}`. public static class MockLoadManager implements LoadManager { @Override @@ -291,7 +291,7 @@ public void testRegisterFailWithSameBrokerId() throws Exception { pulsar1.start(); pulsar2.start(); - doReturn(pulsar1.getLookupServiceAddress()).when(pulsar2).getLookupServiceAddress(); + doReturn(pulsar1.getBrokerId()).when(pulsar2).getBrokerId(); BrokerRegistryImpl brokerRegistry1 = createBrokerRegistryImpl(pulsar1); BrokerRegistryImpl brokerRegistry2 = createBrokerRegistryImpl(pulsar2); brokerRegistry1.start(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImplTest.java index cbb3e174a0d87..850bf9a96c8f1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/ExtensibleLoadManagerImplTest.java @@ -18,6 +18,8 @@ */ package org.apache.pulsar.broker.loadbalance.extensions; +import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitState.Releasing; +import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelTest.overrideTableView; import static org.apache.pulsar.broker.loadbalance.extensions.models.SplitDecision.Reason.Admin; import static org.apache.pulsar.broker.loadbalance.extensions.models.SplitDecision.Reason.Bandwidth; import static org.apache.pulsar.broker.loadbalance.extensions.models.SplitDecision.Reason.MsgRate; @@ -35,9 +37,14 @@ import static org.apache.pulsar.broker.loadbalance.extensions.models.UnloadDecision.Reason.Overloaded; import static org.apache.pulsar.broker.loadbalance.extensions.models.UnloadDecision.Reason.Underloaded; import static org.apache.pulsar.broker.loadbalance.extensions.models.UnloadDecision.Reason.Unknown; +import static org.apache.pulsar.broker.namespace.NamespaceService.getHeartbeatNamespace; +import static org.apache.pulsar.broker.namespace.NamespaceService.getHeartbeatNamespaceV2; +import static org.apache.pulsar.broker.namespace.NamespaceService.getSLAMonitorNamespace; +import static org.mockito.ArgumentMatchers.isA; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -65,6 +72,7 @@ import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; @@ -73,6 +81,7 @@ import org.apache.pulsar.broker.loadbalance.LeaderElectionService; import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitState; import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl; +import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateData; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLoadData; import org.apache.pulsar.broker.loadbalance.extensions.data.BrokerLookupData; import org.apache.pulsar.broker.loadbalance.extensions.data.TopBundlesLoadData; @@ -91,12 +100,14 @@ import org.apache.pulsar.broker.namespace.NamespaceBundleSplitListener; import org.apache.pulsar.broker.namespace.NamespaceEphemeralData; import org.apache.pulsar.broker.namespace.NamespaceService; +import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.testcontext.PulsarTestContext; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.impl.TableViewImpl; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.ServiceUnitId; +import org.apache.pulsar.common.naming.SystemTopicNames; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.naming.TopicVersion; import org.apache.pulsar.common.policies.data.BrokerAssignment; @@ -110,6 +121,8 @@ import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.SystemResourceUsage; import org.awaitility.Awaitility; +import org.mockito.MockedStatic; +import org.testng.AssertJUnit; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; @@ -136,9 +149,7 @@ public class ExtensibleLoadManagerImplTest extends MockedPulsarServiceBaseTest { private final String defaultTestNamespace = "public/test"; - @BeforeClass - @Override - public void setup() throws Exception { + private static void initConfig(ServiceConfiguration conf){ conf.setForceDeleteNamespaceAllowed(true); conf.setAllowAutoTopicCreationType(TopicType.NON_PARTITIONED); conf.setAllowAutoTopicCreation(true); @@ -146,16 +157,19 @@ public void setup() throws Exception { conf.setLoadBalancerLoadSheddingStrategy(TransferShedder.class.getName()); conf.setLoadBalancerSheddingEnabled(false); conf.setLoadBalancerDebugModeEnabled(true); - conf.setTopicLevelPoliciesEnabled(false); + conf.setTopicLevelPoliciesEnabled(true); + } + + @BeforeClass + @Override + public void setup() throws Exception { + // Set the inflight state waiting time and ownership monitor delay time to 5 seconds to avoid + // stuck when doing unload. + initConfig(conf); super.internalSetup(conf); pulsar1 = pulsar; ServiceConfiguration defaultConf = getDefaultConf(); - defaultConf.setAllowAutoTopicCreation(true); - defaultConf.setForceDeleteNamespaceAllowed(true); - defaultConf.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); - defaultConf.setLoadBalancerLoadSheddingStrategy(TransferShedder.class.getName()); - defaultConf.setLoadBalancerSheddingEnabled(false); - defaultConf.setTopicLevelPoliciesEnabled(false); + initConfig(defaultConf); additionalPulsarTestContext = createAdditionalPulsarTestContext(defaultConf); pulsar2 = additionalPulsarTestContext.getPulsarService(); @@ -178,12 +192,10 @@ public void setup() throws Exception { } @Override - @AfterClass + @AfterClass(alwaysRun = true) protected void cleanup() throws Exception { - pulsar1 = null; - pulsar2.close(); - super.internalCleanup(); this.additionalPulsarTestContext.close(); + super.internalCleanup(); } @BeforeMethod(alwaysRun = true) @@ -221,9 +233,6 @@ public void testAssign() throws Exception { Optional brokerLookupData1 = secondaryLoadManager.assign(Optional.empty(), bundle).get(); assertEquals(brokerLookupData, brokerLookupData1); - verify(primaryLoadManager, times(1)).getBrokerSelectionStrategy(); - verify(secondaryLoadManager, times(0)).getBrokerSelectionStrategy(); - Optional lookupResult = pulsar2.getNamespaceService() .getBrokerServiceUrlAsync(topicName, null).get(); assertTrue(lookupResult.isPresent()); @@ -271,7 +280,7 @@ public String name() { public CompletableFuture> filterAsync(Map brokers, ServiceUnitId serviceUnit, LoadManagerContext context) { - brokers.remove(pulsar1.getLookupServiceAddress()); + brokers.remove(pulsar1.getBrokerId()); return CompletableFuture.completedFuture(brokers); } @@ -355,10 +364,10 @@ public boolean test(NamespaceBundle namespaceBundle) { }); - String dstBrokerUrl = pulsar1.getLookupServiceAddress(); + String dstBrokerUrl = pulsar1.getBrokerId(); String dstBrokerServiceUrl; if (broker.equals(pulsar1.getBrokerServiceUrl())) { - dstBrokerUrl = pulsar2.getLookupServiceAddress(); + dstBrokerUrl = pulsar2.getBrokerId(); dstBrokerServiceUrl = pulsar2.getBrokerServiceUrl(); } else { dstBrokerServiceUrl = pulsar1.getBrokerServiceUrl(); @@ -465,7 +474,8 @@ public void testSplitBundleWithSpecificPositionAdminAPI() throws Exception { "specified_positions_divide", List.of(bundleRanges.get(0), bundleRanges.get(1), splitPosition)); BundlesData bundlesData = admin.namespaces().getBundles(namespace); - assertEquals(bundlesData.getNumBundles(), numBundles + 1); + Awaitility.waitAtMost(15, TimeUnit.SECONDS) + .untilAsserted(() -> assertEquals(bundlesData.getNumBundles(), numBundles + 1)); String lowBundle = String.format("0x%08x", bundleRanges.get(0)); String midBundle = String.format("0x%08x", splitPosition); String highBundle = String.format("0x%08x", bundleRanges.get(1)); @@ -478,15 +488,26 @@ public void testDeleteNamespaceBundle() throws Exception { final String namespace = "public/testDeleteNamespaceBundle"; admin.namespaces().createNamespace(namespace, 3); TopicName topicName = TopicName.get(namespace + "/test-delete-namespace-bundle"); - NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); - String broker = admin.lookups().lookupTopic(topicName.toString()); - log.info("Assign the bundle {} to {}", bundle, broker); - - checkOwnershipState(broker, bundle); - admin.namespaces().deleteNamespaceBundle(topicName.getNamespace(), bundle.getBundleRange()); - assertFalse(primaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + Awaitility.await() + .atMost(15, TimeUnit.SECONDS) + .ignoreExceptions() + .untilAsserted(() -> { + NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); + String broker = admin.lookups().lookupTopic(topicName.toString()); + log.info("Assign the bundle {} to {}", bundle, broker); + checkOwnershipState(broker, bundle); + admin.namespaces().deleteNamespaceBundle(topicName.getNamespace(), bundle.getBundleRange(), true); + // this could fail if the system topic lookup asynchronously happens before this. + // we will retry if it fails. + assertFalse(primaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()); + }); + + Awaitility.await() + .atMost(15, TimeUnit.SECONDS) + .ignoreExceptions() + .untilAsserted(() -> admin.namespaces().deleteNamespace(namespace, true)); } @Test(timeOut = 30 * 1000) @@ -529,13 +550,13 @@ public void testMoreThenOneFilter() throws Exception { TopicName topicName = TopicName.get(defaultTestNamespace + "/test-filter-has-exception"); NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); - String lookupServiceAddress1 = pulsar1.getLookupServiceAddress(); + String brokerId1 = pulsar1.getBrokerId(); doReturn(List.of(new MockBrokerFilter() { @Override public CompletableFuture> filterAsync(Map brokers, ServiceUnitId serviceUnit, LoadManagerContext context) { - brokers.remove(lookupServiceAddress1); + brokers.remove(brokerId1); return CompletableFuture.completedFuture(brokers); } },new MockBrokerFilter() { @@ -554,99 +575,164 @@ public CompletableFuture> filterAsync(Map webServiceUrl1 = - pulsar1.getNamespaceService().getWebServiceUrl(bundle, options); - assertTrue(webServiceUrl1.isPresent()); - assertEquals(webServiceUrl1.get().toString(), pulsar3.getWebServiceAddress()); - - Optional webServiceUrl2 = - pulsar2.getNamespaceService().getWebServiceUrl(bundle, options); - assertTrue(webServiceUrl2.isPresent()); - assertEquals(webServiceUrl2.get().toString(), webServiceUrl1.get().toString()); - - Optional webServiceUrl3 = - pulsar3.getNamespaceService().getWebServiceUrl(bundle, options); - assertTrue(webServiceUrl3.isPresent()); - assertEquals(webServiceUrl3.get().toString(), webServiceUrl1.get().toString()); - - // Test deploy new broker with new load manager - ServiceConfiguration conf = getDefaultConf(); - conf.setAllowAutoTopicCreation(true); - conf.setForceDeleteNamespaceAllowed(true); - conf.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); - conf.setLoadBalancerLoadSheddingStrategy(TransferShedder.class.getName()); - try (var additionPulsarTestContext = createAdditionalPulsarTestContext(conf)) { - var pulsar4 = additionPulsarTestContext.getPulsarService(); - - Set availableCandidates = Sets.newHashSet(pulsar1.getBrokerServiceUrl(), - pulsar2.getBrokerServiceUrl(), - pulsar4.getBrokerServiceUrl()); - String lookupResult4 = pulsar4.getAdminClient().lookups().lookupTopic(topic); - assertTrue(availableCandidates.contains(lookupResult4)); - - String lookupResult5 = pulsar1.getAdminClient().lookups().lookupTopic(topic); - String lookupResult6 = pulsar2.getAdminClient().lookups().lookupTopic(topic); - String lookupResult7 = pulsar3.getAdminClient().lookups().lookupTopic(topic); - assertEquals(lookupResult4, lookupResult5); - assertEquals(lookupResult4, lookupResult6); - assertEquals(lookupResult4, lookupResult7); - - Set availableWebUrlCandidates = Sets.newHashSet(pulsar1.getWebServiceAddress(), - pulsar2.getWebServiceAddress(), - pulsar4.getWebServiceAddress()); - - webServiceUrl1 = + try (MockedStatic channelMockedStatic = + mockStatic(ServiceUnitStateChannelImpl.class)) { + channelMockedStatic.when(() -> ServiceUnitStateChannelImpl.newInstance(isA(PulsarService.class))) + .thenAnswer(invocation -> { + PulsarService pulsarService = invocation.getArgument(0); + // Set the inflight state waiting time and ownership monitor delay time to 5 seconds to avoid + // stuck when doing unload. + return new ServiceUnitStateChannelImpl(pulsarService, 5 * 1000, 1); + }); + // Test rollback to modular load manager. + ServiceConfiguration defaultConf = getDefaultConf(); + defaultConf.setAllowAutoTopicCreation(true); + defaultConf.setForceDeleteNamespaceAllowed(true); + defaultConf.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); + defaultConf.setLoadBalancerSheddingEnabled(false); + try (var additionalPulsarTestContext = createAdditionalPulsarTestContext(defaultConf)) { + // start pulsar3 with old load manager + var pulsar3 = additionalPulsarTestContext.getPulsarService(); + String topic = "persistent://" + defaultTestNamespace + "/test"; + + String lookupResult1 = pulsar3.getAdminClient().lookups().lookupTopic(topic); + assertEquals(lookupResult1, pulsar3.getBrokerServiceUrl()); + + String lookupResult2 = pulsar1.getAdminClient().lookups().lookupTopic(topic); + String lookupResult3 = pulsar2.getAdminClient().lookups().lookupTopic(topic); + assertEquals(lookupResult1, lookupResult2); + assertEquals(lookupResult1, lookupResult3); + + NamespaceBundle bundle = getBundleAsync(pulsar1, TopicName.get(topic)).get(); + LookupOptions options = LookupOptions.builder() + .authoritative(false) + .requestHttps(false) + .readOnly(false) + .loadTopicsInBundle(false).build(); + Optional webServiceUrl1 = pulsar1.getNamespaceService().getWebServiceUrl(bundle, options); assertTrue(webServiceUrl1.isPresent()); - assertTrue(availableWebUrlCandidates.contains(webServiceUrl1.get().toString())); + assertEquals(webServiceUrl1.get().toString(), pulsar3.getWebServiceAddress()); - webServiceUrl2 = + Optional webServiceUrl2 = pulsar2.getNamespaceService().getWebServiceUrl(bundle, options); assertTrue(webServiceUrl2.isPresent()); assertEquals(webServiceUrl2.get().toString(), webServiceUrl1.get().toString()); - // The pulsar3 will redirect to pulsar4 - webServiceUrl3 = + Optional webServiceUrl3 = pulsar3.getNamespaceService().getWebServiceUrl(bundle, options); assertTrue(webServiceUrl3.isPresent()); - // It will redirect to pulsar4 - assertTrue(availableWebUrlCandidates.contains(webServiceUrl3.get().toString())); - - var webServiceUrl4 = - pulsar4.getNamespaceService().getWebServiceUrl(bundle, options); - assertTrue(webServiceUrl4.isPresent()); - assertEquals(webServiceUrl4.get().toString(), webServiceUrl1.get().toString()); + assertEquals(webServiceUrl3.get().toString(), webServiceUrl1.get().toString()); + + List pulsarServices = List.of(pulsar1, pulsar2, pulsar3); + for (PulsarService pulsarService : pulsarServices) { + // Test lookup heartbeat namespace's topic + for (PulsarService pulsar : pulsarServices) { + assertLookupHeartbeatOwner(pulsarService, + pulsar.getBrokerId(), pulsar.getBrokerServiceUrl()); + } + // Test lookup SLA namespace's topic + for (PulsarService pulsar : pulsarServices) { + assertLookupSLANamespaceOwner(pulsarService, + pulsar.getBrokerId(), pulsar.getBrokerServiceUrl()); + } + } + // Test deploy new broker with new load manager + ServiceConfiguration conf = getDefaultConf(); + conf.setAllowAutoTopicCreation(true); + conf.setForceDeleteNamespaceAllowed(true); + conf.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName()); + conf.setLoadBalancerLoadSheddingStrategy(TransferShedder.class.getName()); + try (var additionPulsarTestContext = createAdditionalPulsarTestContext(conf)) { + var pulsar4 = additionPulsarTestContext.getPulsarService(); + + Set availableCandidates = Sets.newHashSet(pulsar1.getBrokerServiceUrl(), + pulsar2.getBrokerServiceUrl(), + pulsar4.getBrokerServiceUrl()); + String lookupResult4 = pulsar4.getAdminClient().lookups().lookupTopic(topic); + assertTrue(availableCandidates.contains(lookupResult4)); + + String lookupResult5 = pulsar1.getAdminClient().lookups().lookupTopic(topic); + String lookupResult6 = pulsar2.getAdminClient().lookups().lookupTopic(topic); + String lookupResult7 = pulsar3.getAdminClient().lookups().lookupTopic(topic); + assertEquals(lookupResult4, lookupResult5); + assertEquals(lookupResult4, lookupResult6); + assertEquals(lookupResult4, lookupResult7); + + Set availableWebUrlCandidates = Sets.newHashSet(pulsar1.getWebServiceAddress(), + pulsar2.getWebServiceAddress(), + pulsar4.getWebServiceAddress()); + + webServiceUrl1 = + pulsar1.getNamespaceService().getWebServiceUrl(bundle, options); + assertTrue(webServiceUrl1.isPresent()); + assertTrue(availableWebUrlCandidates.contains(webServiceUrl1.get().toString())); + + webServiceUrl2 = + pulsar2.getNamespaceService().getWebServiceUrl(bundle, options); + assertTrue(webServiceUrl2.isPresent()); + assertEquals(webServiceUrl2.get().toString(), webServiceUrl1.get().toString()); + + // The pulsar3 will redirect to pulsar4 + webServiceUrl3 = + pulsar3.getNamespaceService().getWebServiceUrl(bundle, options); + assertTrue(webServiceUrl3.isPresent()); + // It will redirect to pulsar4 + assertTrue(availableWebUrlCandidates.contains(webServiceUrl3.get().toString())); + + var webServiceUrl4 = + pulsar4.getNamespaceService().getWebServiceUrl(bundle, options); + assertTrue(webServiceUrl4.isPresent()); + assertEquals(webServiceUrl4.get().toString(), webServiceUrl1.get().toString()); + + pulsarServices = List.of(pulsar1, pulsar2, pulsar3, pulsar4); + for (PulsarService pulsarService : pulsarServices) { + // Test lookup heartbeat namespace's topic + for (PulsarService pulsar : pulsarServices) { + assertLookupHeartbeatOwner(pulsarService, + pulsar.getBrokerId(), pulsar.getBrokerServiceUrl()); + } + // Test lookup SLA namespace's topic + for (PulsarService pulsar : pulsarServices) { + assertLookupSLANamespaceOwner(pulsarService, + pulsar.getBrokerId(), pulsar.getBrokerServiceUrl()); + } + } + } } } + } - @Test + private void assertLookupHeartbeatOwner(PulsarService pulsar, + String brokerId, + String expectedBrokerServiceUrl) throws Exception { + NamespaceName heartbeatNamespaceV1 = + getHeartbeatNamespace(brokerId, pulsar.getConfiguration()); + + String heartbeatV1Topic = heartbeatNamespaceV1.getPersistentTopicName("test"); + assertEquals(pulsar.getAdminClient().lookups().lookupTopic(heartbeatV1Topic), expectedBrokerServiceUrl); + + NamespaceName heartbeatNamespaceV2 = + getHeartbeatNamespaceV2(brokerId, pulsar.getConfiguration()); + + String heartbeatV2Topic = heartbeatNamespaceV2.getPersistentTopicName("test"); + assertEquals(pulsar.getAdminClient().lookups().lookupTopic(heartbeatV2Topic), expectedBrokerServiceUrl); + } + + private void assertLookupSLANamespaceOwner(PulsarService pulsar, + String brokerId, + String expectedBrokerServiceUrl) throws Exception { + NamespaceName slaMonitorNamespace = getSLAMonitorNamespace(brokerId, pulsar.getConfiguration()); + String slaMonitorTopic = slaMonitorNamespace.getPersistentTopicName("test"); + String result = pulsar.getAdminClient().lookups().lookupTopic(slaMonitorTopic); + log.info("Topic {} Lookup result: {}", slaMonitorTopic, result); + assertNotNull(result); + assertEquals(result, expectedBrokerServiceUrl); + } + + @Test(priority = 10) public void testTopBundlesLoadDataStoreTableViewFromChannelOwner() throws Exception { var topBundlesLoadDataStorePrimary = (LoadDataStore) FieldUtils.readDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", true); @@ -714,7 +800,6 @@ public void testRoleChange() throws Exception { reset(); return null; }).when(topBundlesLoadDataStorePrimarySpy).closeTableView(); - FieldUtils.writeDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", topBundlesLoadDataStorePrimarySpy, true); var topBundlesLoadDataStoreSecondary = (LoadDataStore) FieldUtils.readDeclaredField(secondaryLoadManager, "topBundlesLoadDataStore", true); @@ -737,36 +822,65 @@ public void testRoleChange() throws Exception { reset(); return null; }).when(topBundlesLoadDataStoreSecondarySpy).closeTableView(); - FieldUtils.writeDeclaredField(secondaryLoadManager, "topBundlesLoadDataStore", topBundlesLoadDataStoreSecondarySpy, true); - if (channel1.isChannelOwnerAsync().get(5, TimeUnit.SECONDS)) { - primaryLoadManager.playFollower(); + try { + FieldUtils.writeDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", + topBundlesLoadDataStorePrimarySpy, true); + FieldUtils.writeDeclaredField(secondaryLoadManager, "topBundlesLoadDataStore", + topBundlesLoadDataStoreSecondarySpy, true); + + + if (channel1.isChannelOwnerAsync().get(5, TimeUnit.SECONDS)) { + primaryLoadManager.playLeader(); + secondaryLoadManager.playFollower(); + verify(topBundlesLoadDataStorePrimarySpy, times(3)).startTableView(); + verify(topBundlesLoadDataStorePrimarySpy, times(5)).closeTableView(); + verify(topBundlesLoadDataStoreSecondarySpy, times(0)).startTableView(); + verify(topBundlesLoadDataStoreSecondarySpy, times(3)).closeTableView(); + } else { + primaryLoadManager.playFollower(); + secondaryLoadManager.playLeader(); + verify(topBundlesLoadDataStoreSecondarySpy, times(3)).startTableView(); + verify(topBundlesLoadDataStoreSecondarySpy, times(5)).closeTableView(); + verify(topBundlesLoadDataStorePrimarySpy, times(0)).startTableView(); + verify(topBundlesLoadDataStorePrimarySpy, times(3)).closeTableView(); + } + primaryLoadManager.playFollower(); - secondaryLoadManager.playLeader(); - secondaryLoadManager.playLeader(); - primaryLoadManager.playLeader(); - primaryLoadManager.playLeader(); secondaryLoadManager.playFollower(); - secondaryLoadManager.playFollower(); - } else { - primaryLoadManager.playLeader(); - primaryLoadManager.playLeader(); - secondaryLoadManager.playFollower(); - secondaryLoadManager.playFollower(); - primaryLoadManager.playFollower(); - primaryLoadManager.playFollower(); - secondaryLoadManager.playLeader(); - secondaryLoadManager.playLeader(); - } + if (channel1.isChannelOwnerAsync().get(5, TimeUnit.SECONDS)) { + assertEquals(ExtensibleLoadManagerImpl.Role.Leader, + FieldUtils.readDeclaredField(primaryLoadManager, "role", true)); + assertEquals(ExtensibleLoadManagerImpl.Role.Follower, + FieldUtils.readDeclaredField(secondaryLoadManager, "role", true)); + } else { + assertEquals(ExtensibleLoadManagerImpl.Role.Follower, + FieldUtils.readDeclaredField(primaryLoadManager, "role", true)); + assertEquals(ExtensibleLoadManagerImpl.Role.Leader, + FieldUtils.readDeclaredField(secondaryLoadManager, "role", true)); + } - verify(topBundlesLoadDataStorePrimarySpy, times(3)).startTableView(); - verify(topBundlesLoadDataStorePrimarySpy, times(3)).closeTableView(); - verify(topBundlesLoadDataStoreSecondarySpy, times(3)).startTableView(); - verify(topBundlesLoadDataStoreSecondarySpy, times(3)).closeTableView(); + primaryLoadManager.playLeader(); + secondaryLoadManager.playLeader(); - FieldUtils.writeDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", topBundlesLoadDataStorePrimary, true); - FieldUtils.writeDeclaredField(secondaryLoadManager, "topBundlesLoadDataStore", topBundlesLoadDataStoreSecondary, true); + if (channel1.isChannelOwnerAsync().get(5, TimeUnit.SECONDS)) { + assertEquals(ExtensibleLoadManagerImpl.Role.Leader, + FieldUtils.readDeclaredField(primaryLoadManager, "role", true)); + assertEquals(ExtensibleLoadManagerImpl.Role.Follower, + FieldUtils.readDeclaredField(secondaryLoadManager, "role", true)); + } else { + assertEquals(ExtensibleLoadManagerImpl.Role.Follower, + FieldUtils.readDeclaredField(primaryLoadManager, "role", true)); + assertEquals(ExtensibleLoadManagerImpl.Role.Leader, + FieldUtils.readDeclaredField(secondaryLoadManager, "role", true)); + } + } finally { + FieldUtils.writeDeclaredField(primaryLoadManager, "topBundlesLoadDataStore", + topBundlesLoadDataStorePrimary, true); + FieldUtils.writeDeclaredField(secondaryLoadManager, "topBundlesLoadDataStore", + topBundlesLoadDataStoreSecondary, true); + } } @Test @@ -979,7 +1093,7 @@ public void testDisableBroker() throws Exception { NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); if (!pulsar3.getBrokerServiceUrl().equals(lookupResult1)) { admin.namespaces().unloadNamespaceBundle(topicName.getNamespace(), bundle.getBundleRange(), - pulsar3.getLookupServiceAddress()); + pulsar3.getBrokerId()); lookupResult1 = pulsar2.getAdminClient().lookups().lookupTopic(topic); } String lookupResult2 = pulsar1.getAdminClient().lookups().lookupTopic(topic); @@ -1043,17 +1157,23 @@ public void testListTopic() throws Exception { admin.namespaces().deleteNamespace(namespace, true); } - @Test(timeOut = 30 * 1000) + @Test(timeOut = 30 * 1000, priority = -1) public void testGetOwnedServiceUnitsAndGetOwnedNamespaceStatus() throws Exception { NamespaceName heartbeatNamespacePulsar1V1 = - NamespaceService.getHeartbeatNamespace(pulsar1.getAdvertisedAddress(), pulsar1.getConfiguration()); + getHeartbeatNamespace(pulsar1.getBrokerId(), pulsar1.getConfiguration()); NamespaceName heartbeatNamespacePulsar1V2 = - NamespaceService.getHeartbeatNamespaceV2(pulsar1.getAdvertisedAddress(), pulsar1.getConfiguration()); + NamespaceService.getHeartbeatNamespaceV2(pulsar1.getBrokerId(), pulsar1.getConfiguration()); NamespaceName heartbeatNamespacePulsar2V1 = - NamespaceService.getHeartbeatNamespace(pulsar2.getAdvertisedAddress(), pulsar2.getConfiguration()); + getHeartbeatNamespace(pulsar2.getBrokerId(), pulsar2.getConfiguration()); NamespaceName heartbeatNamespacePulsar2V2 = - NamespaceService.getHeartbeatNamespaceV2(pulsar2.getAdvertisedAddress(), pulsar2.getConfiguration()); + NamespaceService.getHeartbeatNamespaceV2(pulsar2.getBrokerId(), pulsar2.getConfiguration()); + + NamespaceName slaMonitorNamespacePulsar1 = + getSLAMonitorNamespace(pulsar1.getBrokerId(), pulsar1.getConfiguration()); + + NamespaceName slaMonitorNamespacePulsar2 = + getSLAMonitorNamespace(pulsar2.getBrokerId(), pulsar2.getConfiguration()); NamespaceBundle bundle1 = pulsar1.getNamespaceService().getNamespaceBundleFactory() .getFullBundle(heartbeatNamespacePulsar1V1); @@ -1065,27 +1185,34 @@ public void testGetOwnedServiceUnitsAndGetOwnedNamespaceStatus() throws Exceptio NamespaceBundle bundle4 = pulsar2.getNamespaceService().getNamespaceBundleFactory() .getFullBundle(heartbeatNamespacePulsar2V2); + NamespaceBundle slaBundle1 = pulsar1.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(slaMonitorNamespacePulsar1); + NamespaceBundle slaBundle2 = pulsar2.getNamespaceService().getNamespaceBundleFactory() + .getFullBundle(slaMonitorNamespacePulsar2); + + Set ownedServiceUnitsByPulsar1 = primaryLoadManager.getOwnedServiceUnits(); log.info("Owned service units: {}", ownedServiceUnitsByPulsar1); // heartbeat namespace bundle will own by pulsar1 - assertEquals(ownedServiceUnitsByPulsar1.size(), 2); assertTrue(ownedServiceUnitsByPulsar1.contains(bundle1)); assertTrue(ownedServiceUnitsByPulsar1.contains(bundle2)); + assertTrue(ownedServiceUnitsByPulsar1.contains(slaBundle1)); Set ownedServiceUnitsByPulsar2 = secondaryLoadManager.getOwnedServiceUnits(); log.info("Owned service units: {}", ownedServiceUnitsByPulsar2); - assertEquals(ownedServiceUnitsByPulsar2.size(), 2); assertTrue(ownedServiceUnitsByPulsar2.contains(bundle3)); assertTrue(ownedServiceUnitsByPulsar2.contains(bundle4)); + assertTrue(ownedServiceUnitsByPulsar2.contains(slaBundle2)); Map ownedNamespacesByPulsar1 = - admin.brokers().getOwnedNamespaces(conf.getClusterName(), pulsar1.getLookupServiceAddress()); + admin.brokers().getOwnedNamespaces(conf.getClusterName(), pulsar1.getBrokerId()); Map ownedNamespacesByPulsar2 = - admin.brokers().getOwnedNamespaces(conf.getClusterName(), pulsar2.getLookupServiceAddress()); - assertEquals(ownedNamespacesByPulsar1.size(), 2); + admin.brokers().getOwnedNamespaces(conf.getClusterName(), pulsar2.getBrokerId()); assertTrue(ownedNamespacesByPulsar1.containsKey(bundle1.toString())); assertTrue(ownedNamespacesByPulsar1.containsKey(bundle2.toString())); - assertEquals(ownedNamespacesByPulsar2.size(), 2); + assertTrue(ownedNamespacesByPulsar1.containsKey(slaBundle1.toString())); + assertTrue(ownedNamespacesByPulsar2.containsKey(bundle3.toString())); assertTrue(ownedNamespacesByPulsar2.containsKey(bundle4.toString())); + assertTrue(ownedNamespacesByPulsar2.containsKey(slaBundle2.toString())); String topic = "persistent://" + defaultTestNamespace + "/test-get-owned-service-units"; admin.topics().createPartitionedTopic(topic, 1); @@ -1110,7 +1237,7 @@ private void assertOwnedServiceUnits( assertTrue(ownedBundles.contains(bundle)); }); Map ownedNamespaces = - admin.brokers().getOwnedNamespaces(conf.getClusterName(), pulsar.getLookupServiceAddress()); + admin.brokers().getOwnedNamespaces(conf.getClusterName(), pulsar.getBrokerId()); assertTrue(ownedNamespaces.containsKey(bundle.toString())); NamespaceOwnershipStatus status = ownedNamespaces.get(bundle.toString()); assertTrue(status.is_active); @@ -1134,7 +1261,8 @@ public void testTryAcquiringOwnership() String topic = "persistent://" + namespace + "/test"; NamespaceBundle bundle = getBundleAsync(pulsar1, TopicName.get(topic)).get(); NamespaceEphemeralData namespaceEphemeralData = primaryLoadManager.tryAcquiringOwnership(bundle).get(); - assertEquals(namespaceEphemeralData.getNativeUrl(), pulsar1.getBrokerServiceUrl()); + assertTrue(Set.of(pulsar1.getBrokerServiceUrl(), pulsar2.getBrokerServiceUrl()) + .contains(namespaceEphemeralData.getNativeUrl())); admin.namespaces().deleteNamespace(namespace, true); } @@ -1143,6 +1271,47 @@ public void testHealthcheck() throws PulsarAdminException { admin.brokers().healthcheck(TopicVersion.V2); } + @Test(timeOut = 30 * 1000) + public void compactionScheduleTest() { + Awaitility.await() + .pollInterval(200, TimeUnit.MILLISECONDS) + .atMost(30, TimeUnit.SECONDS) + .ignoreExceptions() + .untilAsserted(() -> { // wait until true + primaryLoadManager.monitor(); + secondaryLoadManager.monitor(); + var threshold = admin.topicPolicies() + .getCompactionThreshold(ServiceUnitStateChannelImpl.TOPIC, false); + AssertJUnit.assertEquals(5 * 1024 * 1024, threshold == null ? 0 : threshold.longValue()); + }); + } + + @Test(timeOut = 10 * 1000) + public void unloadTimeoutCheckTest() + throws Exception { + Pair topicAndBundle = getBundleIsNotOwnByChangeEventTopic("unload-timeout"); + String topic = topicAndBundle.getLeft().toString(); + var bundle = topicAndBundle.getRight().toString(); + var releasing = new ServiceUnitStateData(Releasing, pulsar2.getBrokerId(), pulsar1.getBrokerId(), 1); + overrideTableView(channel1, bundle, releasing); + var topicFuture = pulsar1.getBrokerService().getOrCreateTopic(topic); + + + try { + topicFuture.get(1, TimeUnit.SECONDS); + } catch (Exception e) { + log.info("getOrCreateTopic failed", e); + if (!(e.getCause() instanceof BrokerServiceException.ServiceUnitNotReadyException && e.getMessage() + .contains("Please redo the lookup"))) { + fail(); + } + } + + pulsar1.getBrokerService() + .unloadServiceUnit(topicAndBundle.getRight(), true, 5, + TimeUnit.SECONDS).get(2, TimeUnit.SECONDS); + } + private static abstract class MockBrokerFilter implements BrokerFilter { @Override @@ -1175,4 +1344,20 @@ private void setSecondaryLoadManager() throws IllegalAccessException { private CompletableFuture getBundleAsync(PulsarService pulsar, TopicName topic) { return pulsar.getNamespaceService().getBundleAsync(topic); } + + private Pair getBundleIsNotOwnByChangeEventTopic(String topicNamePrefix) + throws Exception { + TopicName changeEventsTopicName = + TopicName.get(defaultTestNamespace + "/" + SystemTopicNames.NAMESPACE_EVENTS_LOCAL_NAME); + NamespaceBundle changeEventsBundle = getBundleAsync(pulsar1, changeEventsTopicName).get(); + int i = 0; + while (true) { + TopicName topicName = TopicName.get(defaultTestNamespace + "/" + topicNamePrefix + "-" + i); + NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get(); + if (!bundle.equals(changeEventsBundle)) { + return Pair.of(topicName, bundle); + } + i++; + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelTest.java index ace31b1d60bb2..ceb58e8d9647c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/channel/ServiceUnitStateChannelTest.java @@ -39,6 +39,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertThrows; +import static org.testng.Assert.expectThrows; import static org.testng.AssertJUnit.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; @@ -60,6 +61,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -84,11 +86,9 @@ import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.testcontext.PulsarTestContext; import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.TypedMessageBuilder; import org.apache.pulsar.client.impl.TableViewImpl; import org.apache.pulsar.common.policies.data.TopicType; -import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.coordination.LeaderElectionState; @@ -106,8 +106,8 @@ public class ServiceUnitStateChannelTest extends MockedPulsarServiceBaseTest { private PulsarService pulsar2; private ServiceUnitStateChannel channel1; private ServiceUnitStateChannel channel2; - private String lookupServiceAddress1; - private String lookupServiceAddress2; + private String brokerId1; + private String brokerId2; private String bundle; private String bundle1; private String bundle2; @@ -155,10 +155,10 @@ protected void setup() throws Exception { channel2 = createChannel(pulsar2); channel2.start(); - lookupServiceAddress1 = (String) - FieldUtils.readDeclaredField(channel1, "lookupServiceAddress", true); - lookupServiceAddress2 = (String) - FieldUtils.readDeclaredField(channel2, "lookupServiceAddress", true); + brokerId1 = (String) + FieldUtils.readDeclaredField(channel1, "brokerId", true); + brokerId2 = (String) + FieldUtils.readDeclaredField(channel2, "brokerId", true); bundle = "public/default/0x00000000_0xffffffff"; bundle1 = "public/default/0x00000000_0xfffffff0"; @@ -218,7 +218,7 @@ public void channelOwnerTest() throws Exception { assertEquals(newChannelOwner1, newChannelOwner2); assertNotEquals(channelOwner1, newChannelOwner1); - if (newChannelOwner1.equals(Optional.of(lookupServiceAddress1))) { + if (newChannelOwner1.equals(Optional.of(brokerId1))) { assertTrue(channel1.isChannelOwnerAsync().get(2, TimeUnit.SECONDS)); assertFalse(channel2.isChannelOwnerAsync().get(2, TimeUnit.SECONDS)); } else { @@ -302,7 +302,7 @@ private int validateChannelStart(ServiceUnitStateChannelImpl channel) } } try { - channel.publishAssignEventAsync(bundle, lookupServiceAddress1).get(2, TimeUnit.SECONDS); + channel.publishAssignEventAsync(bundle, brokerId1).get(2, TimeUnit.SECONDS); } catch (ExecutionException e) { if (e.getCause() instanceof IllegalStateException) { errorCnt++; @@ -310,7 +310,7 @@ private int validateChannelStart(ServiceUnitStateChannelImpl channel) } try { channel.publishUnloadEventAsync( - new Unload(lookupServiceAddress1, bundle, Optional.of(lookupServiceAddress2))) + new Unload(brokerId1, bundle, Optional.of(brokerId2))) .get(2, TimeUnit.SECONDS); } catch (ExecutionException e) { if (e.getCause() instanceof IllegalStateException) { @@ -318,7 +318,7 @@ private int validateChannelStart(ServiceUnitStateChannelImpl channel) } } try { - Split split = new Split(bundle, lookupServiceAddress1, Map.of( + Split split = new Split(bundle, brokerId1, Map.of( childBundle1Range, Optional.empty(), childBundle2Range, Optional.empty())); channel.publishSplitEventAsync(split) .get(2, TimeUnit.SECONDS); @@ -330,23 +330,6 @@ private int validateChannelStart(ServiceUnitStateChannelImpl channel) return errorCnt; } - @Test(priority = 1) - public void compactionScheduleTest() { - - Awaitility.await() - .pollInterval(200, TimeUnit.MILLISECONDS) - .atMost(5, TimeUnit.SECONDS) - .untilAsserted(() -> { // wait until true - try { - var threshold = admin.topicPolicies() - .getCompactionThreshold(ServiceUnitStateChannelImpl.TOPIC, false).longValue(); - assertEquals(5 * 1024 * 1024, threshold); - } catch (Exception e) { - ; - } - }); - } - @Test(priority = 2) public void assignmentTest() throws ExecutionException, InterruptedException, IllegalAccessException, TimeoutException { @@ -359,8 +342,8 @@ public void assignmentTest() assertTrue(owner1.get().isEmpty()); assertTrue(owner2.get().isEmpty()); - var assigned1 = channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - var assigned2 = channel2.publishAssignEventAsync(bundle, lookupServiceAddress2); + var assigned1 = channel1.publishAssignEventAsync(bundle, brokerId1); + var assigned2 = channel2.publishAssignEventAsync(bundle, brokerId2); assertNotNull(assigned1); assertNotNull(assigned2); waitUntilOwnerChanges(channel1, bundle, null); @@ -369,8 +352,8 @@ public void assignmentTest() String assignedAddr2 = assigned2.get(5, TimeUnit.SECONDS); assertEquals(assignedAddr1, assignedAddr2); - assertTrue(assignedAddr1.equals(lookupServiceAddress1) - || assignedAddr1.equals(lookupServiceAddress2), assignedAddr1); + assertTrue(assignedAddr1.equals(brokerId1) + || assignedAddr1.equals(brokerId2), assignedAddr1); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); @@ -411,13 +394,13 @@ public void assignmentTestWhenOneAssignmentFails() assertTrue(owner1.get().isEmpty()); assertTrue(owner2.get().isEmpty()); - var owner3 = channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - var owner4 = channel2.publishAssignEventAsync(bundle, lookupServiceAddress2); + var owner3 = channel1.publishAssignEventAsync(bundle, brokerId1); + var owner4 = channel2.publishAssignEventAsync(bundle, brokerId2); assertTrue(owner3.isCompletedExceptionally()); assertNotNull(owner4); String ownerAddrOpt2 = owner4.get(5, TimeUnit.SECONDS); - assertEquals(ownerAddrOpt2, lookupServiceAddress2); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress2); + assertEquals(ownerAddrOpt2, brokerId2); + waitUntilNewOwner(channel1, bundle, brokerId2); assertEquals(0, getOwnerRequests1.size()); assertEquals(0, getOwnerRequests2.size()); @@ -435,25 +418,25 @@ public void transferTest() assertTrue(owner2.get().isEmpty()); - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); + assertEquals(ownerAddr1, Optional.of(brokerId1)); - Unload unload = new Unload(lookupServiceAddress1, bundle, Optional.of(lookupServiceAddress2)); + Unload unload = new Unload(brokerId1, bundle, Optional.of(brokerId2)); channel1.publishUnloadEventAsync(unload); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress2); + waitUntilNewOwner(channel1, bundle, brokerId2); + waitUntilNewOwner(channel2, bundle, brokerId2); ownerAddr1 = channel1.getOwnerAsync(bundle).get(5, TimeUnit.SECONDS); ownerAddr2 = channel2.getOwnerAsync(bundle).get(5, TimeUnit.SECONDS); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress2)); + assertEquals(ownerAddr1, Optional.of(brokerId2)); validateHandlerCounters(channel1, 2, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0); validateHandlerCounters(channel2, 2, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -470,14 +453,14 @@ public void transferTestWhenDestBrokerFails() assertEquals(0, getOwnerRequests1.size()); assertEquals(0, getOwnerRequests2.size()); - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); + assertEquals(ownerAddr1, Optional.of(brokerId1)); var producer = (Producer) FieldUtils.readDeclaredField(channel1, "producer", true); @@ -493,7 +476,7 @@ public void transferTestWhenDestBrokerFails() "inFlightStateWaitingTimeInMillis", 3 * 1000, true); FieldUtils.writeDeclaredField(channel2, "inFlightStateWaitingTimeInMillis", 3 * 1000, true); - Unload unload = new Unload(lookupServiceAddress1, bundle, Optional.of(lookupServiceAddress2)); + Unload unload = new Unload(brokerId1, bundle, Optional.of(brokerId2)); channel1.publishUnloadEventAsync(unload); // channel1 is broken. the ownership transfer won't be complete. waitUntilState(channel1, bundle); @@ -501,23 +484,21 @@ public void transferTestWhenDestBrokerFails() var owner1 = channel1.getOwnerAsync(bundle); var owner2 = channel2.getOwnerAsync(bundle); - assertFalse(owner1.isDone()); + assertTrue(owner1.isDone()); + assertEquals(brokerId2, owner1.get().get()); assertFalse(owner2.isDone()); - assertEquals(1, getOwnerRequests1.size()); + assertEquals(0, getOwnerRequests1.size()); assertEquals(1, getOwnerRequests2.size()); - // In 5 secs, the getOwnerAsync requests(lookup requests) should time out. - Awaitility.await().atMost(5, TimeUnit.SECONDS) - .untilAsserted(() -> assertTrue(owner1.isCompletedExceptionally())); - Awaitility.await().atMost(5, TimeUnit.SECONDS) + // In 10 secs, the getOwnerAsync requests(lookup requests) should time out. + Awaitility.await().atMost(10, TimeUnit.SECONDS) .untilAsserted(() -> assertTrue(owner2.isCompletedExceptionally())); - assertEquals(0, getOwnerRequests1.size()); assertEquals(0, getOwnerRequests2.size()); // recovered, check the monitor update state : Assigned -> Owned - doReturn(CompletableFuture.completedFuture(Optional.of(lookupServiceAddress1))) + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId1))) .when(loadManager).selectAsync(any(), any()); FieldUtils.writeDeclaredField(channel2, "producer", producer, true); FieldUtils.writeDeclaredField(channel1, @@ -526,18 +507,18 @@ public void transferTestWhenDestBrokerFails() "inFlightStateWaitingTimeInMillis", 1 , true); ((ServiceUnitStateChannelImpl) channel1).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); ((ServiceUnitStateChannelImpl) channel2).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); ownerAddr1 = channel1.getOwnerAsync(bundle).get(); ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); + assertEquals(ownerAddr1, Optional.of(brokerId1)); var leader = channel1.isChannelOwnerAsync().get() ? channel1 : channel2; validateMonitorCounters(leader, @@ -558,13 +539,13 @@ public void transferTestWhenDestBrokerFails() @Test(priority = 6) public void splitAndRetryTest() throws Exception { - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); - assertEquals(ownerAddr2, Optional.of(lookupServiceAddress1)); + assertEquals(ownerAddr1, Optional.of(brokerId1)); + assertEquals(ownerAddr2, Optional.of(brokerId1)); assertTrue(ownerAddr1.isPresent()); NamespaceService namespaceService = spy(pulsar1.getNamespaceService()); @@ -605,14 +586,14 @@ public void splitAndRetryTest() throws Exception { - waitUntilNewOwner(channel1, childBundle11, lookupServiceAddress1); - waitUntilNewOwner(channel1, childBundle12, lookupServiceAddress1); - waitUntilNewOwner(channel2, childBundle11, lookupServiceAddress1); - waitUntilNewOwner(channel2, childBundle12, lookupServiceAddress1); - assertEquals(Optional.of(lookupServiceAddress1), channel1.getOwnerAsync(childBundle11).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel1.getOwnerAsync(childBundle12).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel2.getOwnerAsync(childBundle11).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel2.getOwnerAsync(childBundle12).get()); + waitUntilNewOwner(channel1, childBundle11, brokerId1); + waitUntilNewOwner(channel1, childBundle12, brokerId1); + waitUntilNewOwner(channel2, childBundle11, brokerId1); + waitUntilNewOwner(channel2, childBundle12, brokerId1); + assertEquals(Optional.of(brokerId1), channel1.getOwnerAsync(childBundle11).get()); + assertEquals(Optional.of(brokerId1), channel1.getOwnerAsync(childBundle12).get()); + assertEquals(Optional.of(brokerId1), channel2.getOwnerAsync(childBundle11).get()); + assertEquals(Optional.of(brokerId1), channel2.getOwnerAsync(childBundle12).get()); // try the monitor and check the monitor moves `Deleted` -> `Init` @@ -627,9 +608,9 @@ public void splitAndRetryTest() throws Exception { "semiTerminalStateWaitingTimeInMillis", 1, true); ((ServiceUnitStateChannelImpl) channel1).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); ((ServiceUnitStateChannelImpl) channel2).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); waitUntilState(channel1, bundle, Init); waitUntilState(channel2, bundle, Init); @@ -723,7 +704,7 @@ public void handleBrokerDeletionEventTest() String leader = channel1.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); String leader2 = channel2.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); assertEquals(leader, leader2); - if (leader.equals(lookupServiceAddress2)) { + if (leader.equals(brokerId2)) { leaderChannel = channel2; followerChannel = channel1; var tmp = followerCleanupJobsTmp; @@ -739,22 +720,24 @@ public void handleBrokerDeletionEventTest() var owner1 = channel1.getOwnerAsync(bundle1); var owner2 = channel2.getOwnerAsync(bundle2); - doReturn(CompletableFuture.completedFuture(Optional.of(lookupServiceAddress2))) + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId2))) .when(loadManager).selectAsync(any(), any()); assertTrue(owner1.get().isEmpty()); assertTrue(owner2.get().isEmpty()); - String broker = lookupServiceAddress1; + String broker = brokerId1; channel1.publishAssignEventAsync(bundle1, broker); channel2.publishAssignEventAsync(bundle2, broker); + waitUntilNewOwner(channel1, bundle1, broker); waitUntilNewOwner(channel2, bundle1, broker); waitUntilNewOwner(channel1, bundle2, broker); waitUntilNewOwner(channel2, bundle2, broker); - channel1.publishUnloadEventAsync(new Unload(broker, bundle1, Optional.of(lookupServiceAddress2))); - waitUntilNewOwner(channel1, bundle1, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle1, lookupServiceAddress2); + // Verify to transfer the ownership to the other broker. + channel1.publishUnloadEventAsync(new Unload(broker, bundle1, Optional.of(brokerId2))); + waitUntilNewOwner(channel1, bundle1, brokerId2); + waitUntilNewOwner(channel2, bundle1, brokerId2); // test stable metadata state leaderChannel.handleMetadataSessionEvent(SessionReestablished); @@ -765,11 +748,13 @@ public void handleBrokerDeletionEventTest() System.currentTimeMillis() - (MAX_CLEAN_UP_DELAY_TIME_IN_SECS * 1000 + 1000), true); leaderChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); followerChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); + leaderChannel.handleBrokerRegistrationEvent(brokerId2, NotificationType.Deleted); + followerChannel.handleBrokerRegistrationEvent(brokerId2, NotificationType.Deleted); - waitUntilNewOwner(channel1, bundle1, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle1, lookupServiceAddress2); - waitUntilNewOwner(channel1, bundle2, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle2, lookupServiceAddress2); + waitUntilNewOwner(channel1, bundle1, brokerId2); + waitUntilNewOwner(channel2, bundle1, brokerId2); + waitUntilNewOwner(channel1, bundle2, brokerId2); + waitUntilNewOwner(channel2, bundle2, brokerId2); verify(leaderCleanupJobs, times(1)).computeIfAbsent(eq(broker), any()); verify(followerCleanupJobs, times(0)).computeIfAbsent(eq(broker), any()); @@ -780,18 +765,18 @@ public void handleBrokerDeletionEventTest() }); validateMonitorCounters(leaderChannel, - 1, + 2, 0, - 1, + 3, 0, - 1, + 2, 0, 0); // test jittery metadata state - channel1.publishUnloadEventAsync(new Unload(lookupServiceAddress2, bundle1, Optional.of(broker))); - channel1.publishUnloadEventAsync(new Unload(lookupServiceAddress2, bundle2, Optional.of(broker))); + channel1.publishUnloadEventAsync(new Unload(brokerId2, bundle1, Optional.of(broker))); + channel1.publishUnloadEventAsync(new Unload(brokerId2, bundle2, Optional.of(broker))); waitUntilNewOwner(channel1, bundle1, broker); waitUntilNewOwner(channel2, bundle1, broker); waitUntilNewOwner(channel1, bundle2, broker); @@ -811,11 +796,11 @@ public void handleBrokerDeletionEventTest() }); validateMonitorCounters(leaderChannel, - 1, + 2, 0, - 1, + 3, 0, - 2, + 3, 0, 0); @@ -832,11 +817,11 @@ public void handleBrokerDeletionEventTest() }); validateMonitorCounters(leaderChannel, - 1, + 2, 0, - 1, + 3, 0, - 2, + 3, 0, 1); @@ -854,19 +839,19 @@ public void handleBrokerDeletionEventTest() }); validateMonitorCounters(leaderChannel, - 1, - 0, - 1, + 2, 0, 3, 0, + 4, + 0, 1); // finally cleanup - waitUntilNewOwner(channel1, bundle1, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle1, lookupServiceAddress2); - waitUntilNewOwner(channel1, bundle2, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle2, lookupServiceAddress2); + waitUntilNewOwner(channel1, bundle1, brokerId2); + waitUntilNewOwner(channel2, bundle1, brokerId2); + waitUntilNewOwner(channel1, bundle2, brokerId2); + waitUntilNewOwner(channel2, bundle2, brokerId2); verify(leaderCleanupJobs, times(3)).computeIfAbsent(eq(broker), any()); verify(followerCleanupJobs, times(0)).computeIfAbsent(eq(broker), any()); @@ -876,17 +861,17 @@ public void handleBrokerDeletionEventTest() }); validateMonitorCounters(leaderChannel, - 2, - 0, 3, 0, - 3, + 5, + 0, + 4, 0, 1); // test unstable state - channel1.publishUnloadEventAsync(new Unload(lookupServiceAddress2, bundle1, Optional.of(broker))); - channel1.publishUnloadEventAsync(new Unload(lookupServiceAddress2, bundle2, Optional.of(broker))); + channel1.publishUnloadEventAsync(new Unload(brokerId2, bundle1, Optional.of(broker))); + channel1.publishUnloadEventAsync(new Unload(brokerId2, bundle2, Optional.of(broker))); waitUntilNewOwner(channel1, bundle1, broker); waitUntilNewOwner(channel2, bundle1, broker); waitUntilNewOwner(channel1, bundle2, broker); @@ -905,11 +890,11 @@ public void handleBrokerDeletionEventTest() }); validateMonitorCounters(leaderChannel, - 2, - 0, 3, 0, - 3, + 5, + 0, + 4, 1, 1); @@ -922,25 +907,24 @@ public void handleBrokerDeletionEventTest() } @Test(priority = 10) - public void conflictAndCompactionTest() throws ExecutionException, InterruptedException, TimeoutException, - IllegalAccessException, PulsarClientException, PulsarServerException { + public void conflictAndCompactionTest() throws Exception { String bundle = String.format("%s/%s", "public/default", "0x0000000a_0xffffffff"); var owner1 = channel1.getOwnerAsync(bundle); var owner2 = channel2.getOwnerAsync(bundle); assertTrue(owner1.get().isEmpty()); assertTrue(owner2.get().isEmpty()); - var assigned1 = channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); + var assigned1 = channel1.publishAssignEventAsync(bundle, brokerId1); assertNotNull(assigned1); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); String assignedAddr1 = assigned1.get(5, TimeUnit.SECONDS); - assertEquals(lookupServiceAddress1, assignedAddr1); + assertEquals(brokerId1, assignedAddr1); FieldUtils.writeDeclaredField(channel2, "inFlightStateWaitingTimeInMillis", 3 * 1000, true); - var assigned2 = channel2.publishAssignEventAsync(bundle, lookupServiceAddress2); + var assigned2 = channel2.publishAssignEventAsync(bundle, brokerId2); assertNotNull(assigned2); Exception ex = null; try { @@ -949,33 +933,48 @@ public void conflictAndCompactionTest() throws ExecutionException, InterruptedEx ex = e; } assertNull(ex); - assertEquals(Optional.of(lookupServiceAddress1), channel2.getOwnerAsync(bundle).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel1.getOwnerAsync(bundle).get()); + assertEquals(Optional.of(brokerId1), channel2.getOwnerAsync(bundle).get()); + assertEquals(Optional.of(brokerId1), channel1.getOwnerAsync(bundle).get()); var compactor = spy (pulsar1.getStrategicCompactor()); Field strategicCompactorField = FieldUtils.getDeclaredField(PulsarService.class, "strategicCompactor", true); FieldUtils.writeField(strategicCompactorField, pulsar1, compactor, true); FieldUtils.writeField(strategicCompactorField, pulsar2, compactor, true); - Awaitility.await() - .pollInterval(200, TimeUnit.MILLISECONDS) - .atMost(140, TimeUnit.SECONDS) - .untilAsserted(() -> { - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); - verify(compactor, times(1)) - .compact(eq(ServiceUnitStateChannelImpl.TOPIC), any()); - }); + + var threshold = admin.topicPolicies() + .getCompactionThreshold(ServiceUnitStateChannelImpl.TOPIC); + admin.topicPolicies() + .setCompactionThreshold(ServiceUnitStateChannelImpl.TOPIC, 0); + + try { + Awaitility.await() + .pollInterval(200, TimeUnit.MILLISECONDS) + .atMost(140, TimeUnit.SECONDS) + .untilAsserted(() -> { + channel1.publishAssignEventAsync(bundle, brokerId1); + verify(compactor, times(1)) + .compact(eq(ServiceUnitStateChannelImpl.TOPIC), any()); + }); + + + var channel3 = createChannel(pulsar); + channel3.start(); + Awaitility.await() + .pollInterval(200, TimeUnit.MILLISECONDS) + .atMost(5, TimeUnit.SECONDS) + .untilAsserted(() -> assertEquals( + channel3.getOwnerAsync(bundle).get(), Optional.of(brokerId1))); + channel3.close(); + } finally { + FieldUtils.writeDeclaredField(channel2, + "inFlightStateWaitingTimeInMillis", 30 * 1000, true); + if (threshold != null) { + admin.topicPolicies() + .setCompactionThreshold(ServiceUnitStateChannelImpl.TOPIC, threshold); + } + } - var channel3 = createChannel(pulsar); - channel3.start(); - Awaitility.await() - .pollInterval(200, TimeUnit.MILLISECONDS) - .atMost(5, TimeUnit.SECONDS) - .untilAsserted(() -> assertEquals( - channel3.getOwnerAsync(bundle).get(), Optional.of(lookupServiceAddress1))); - channel3.close(); - FieldUtils.writeDeclaredField(channel2, - "inFlightStateWaitingTimeInMillis", 30 * 1000, true); } @Test(priority = 11) @@ -1017,16 +1016,16 @@ public void ownerLookupCountTests() throws IllegalAccessException { public void unloadTest() throws ExecutionException, InterruptedException, IllegalAccessException { - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress1); + waitUntilNewOwner(channel1, bundle, brokerId1); + waitUntilNewOwner(channel2, bundle, brokerId1); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); - Unload unload = new Unload(lookupServiceAddress1, bundle, Optional.empty()); + assertEquals(ownerAddr1, Optional.of(brokerId1)); + Unload unload = new Unload(brokerId1, bundle, Optional.empty()); channel1.publishUnloadEventAsync(unload); @@ -1038,17 +1037,17 @@ public void unloadTest() assertEquals(Optional.empty(), owner1.get()); assertEquals(Optional.empty(), owner2.get()); - channel2.publishAssignEventAsync(bundle, lookupServiceAddress2); + channel2.publishAssignEventAsync(bundle, brokerId2); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress2); + waitUntilNewOwner(channel1, bundle, brokerId2); + waitUntilNewOwner(channel2, bundle, brokerId2); ownerAddr1 = channel1.getOwnerAsync(bundle).get(); ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress2)); - Unload unload2 = new Unload(lookupServiceAddress2, bundle, Optional.empty()); + assertEquals(ownerAddr1, Optional.of(brokerId2)); + Unload unload2 = new Unload(brokerId2, bundle, Optional.empty()); channel2.publishUnloadEventAsync(unload2); @@ -1067,9 +1066,9 @@ public void unloadTest() "semiTerminalStateWaitingTimeInMillis", 1, true); ((ServiceUnitStateChannelImpl) channel1).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); ((ServiceUnitStateChannelImpl) channel2).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); waitUntilState(channel1, bundle, Init); waitUntilState(channel2, bundle, Init); @@ -1099,7 +1098,7 @@ public void unloadTest() public void assignTestWhenDestBrokerProducerFails() throws ExecutionException, InterruptedException, IllegalAccessException { - Unload unload = new Unload(lookupServiceAddress1, bundle, Optional.empty()); + Unload unload = new Unload(brokerId1, bundle, Optional.empty()); channel1.publishUnloadEventAsync(unload); @@ -1123,22 +1122,20 @@ public void assignTestWhenDestBrokerProducerFails() "inFlightStateWaitingTimeInMillis", 3 * 1000, true); FieldUtils.writeDeclaredField(channel2, "inFlightStateWaitingTimeInMillis", 3 * 1000, true); - doReturn(CompletableFuture.completedFuture(Optional.of(lookupServiceAddress2))) + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId2))) .when(loadManager).selectAsync(any(), any()); - channel1.publishAssignEventAsync(bundle, lookupServiceAddress2); + channel1.publishAssignEventAsync(bundle, brokerId2); // channel1 is broken. the assign won't be complete. waitUntilState(channel1, bundle); waitUntilState(channel2, bundle); var owner1 = channel1.getOwnerAsync(bundle); var owner2 = channel2.getOwnerAsync(bundle); - assertFalse(owner1.isDone()); + assertTrue(owner1.isDone()); assertFalse(owner2.isDone()); - // In 5 secs, the getOwnerAsync requests(lookup requests) should time out. - Awaitility.await().atMost(5, TimeUnit.SECONDS) - .untilAsserted(() -> assertTrue(owner1.isCompletedExceptionally())); - Awaitility.await().atMost(5, TimeUnit.SECONDS) + // In 10 secs, the getOwnerAsync requests(lookup requests) should time out. + Awaitility.await().atMost(10, TimeUnit.SECONDS) .untilAsserted(() -> assertTrue(owner2.isCompletedExceptionally())); // recovered, check the monitor update state : Assigned -> Owned @@ -1149,18 +1146,18 @@ public void assignTestWhenDestBrokerProducerFails() "inFlightStateWaitingTimeInMillis", 1 , true); ((ServiceUnitStateChannelImpl) channel1).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); ((ServiceUnitStateChannelImpl) channel2).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); - waitUntilNewOwner(channel1, bundle, lookupServiceAddress2); - waitUntilNewOwner(channel2, bundle, lookupServiceAddress2); + waitUntilNewOwner(channel1, bundle, brokerId2); + waitUntilNewOwner(channel2, bundle, brokerId2); var ownerAddr1 = channel1.getOwnerAsync(bundle).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle).get(); assertEquals(ownerAddr1, ownerAddr2); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress2)); + assertEquals(ownerAddr1, Optional.of(brokerId2)); var leader = channel1.isChannelOwnerAsync().get() ? channel1 : channel2; validateMonitorCounters(leader, @@ -1184,20 +1181,20 @@ public void splitTestWhenProducerFails() throws ExecutionException, InterruptedException, IllegalAccessException { - Unload unload = new Unload(lookupServiceAddress1, bundle, Optional.empty()); + Unload unload = new Unload(brokerId1, bundle, Optional.empty()); channel1.publishUnloadEventAsync(unload); waitUntilState(channel1, bundle, Free); waitUntilState(channel2, bundle, Free); - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); waitUntilState(channel1, bundle, Owned); waitUntilState(channel2, bundle, Owned); - assertEquals(lookupServiceAddress1, channel1.getOwnerAsync(bundle).get().get()); - assertEquals(lookupServiceAddress1, channel2.getOwnerAsync(bundle).get().get()); + assertEquals(brokerId1, channel1.getOwnerAsync(bundle).get().get()); + assertEquals(brokerId1, channel2.getOwnerAsync(bundle).get().get()); var producer = (Producer) FieldUtils.readDeclaredField(channel1, "producer", true); @@ -1216,7 +1213,7 @@ public void splitTestWhenProducerFails() // Assert child bundle ownerships in the channels. - Split split = new Split(bundle, lookupServiceAddress1, Map.of( + Split split = new Split(bundle, brokerId1, Map.of( childBundle1Range, Optional.empty(), childBundle2Range, Optional.empty())); channel2.publishSplitEventAsync(split); // channel1 is broken. the split won't be complete. @@ -1263,13 +1260,13 @@ public void testIsOwner() throws IllegalAccessException { assertFalse(owner1); assertFalse(owner2); - owner1 = channel1.isOwner(bundle, lookupServiceAddress2); - owner2 = channel2.isOwner(bundle, lookupServiceAddress1); + owner1 = channel1.isOwner(bundle, brokerId2); + owner2 = channel2.isOwner(bundle, brokerId1); assertFalse(owner1); assertFalse(owner2); - channel1.publishAssignEventAsync(bundle, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle, brokerId1); owner2 = channel2.isOwner(bundle); assertFalse(owner2); @@ -1282,49 +1279,111 @@ public void testIsOwner() throws IllegalAccessException { assertTrue(owner1); assertFalse(owner2); - owner1 = channel1.isOwner(bundle, lookupServiceAddress1); - owner2 = channel2.isOwner(bundle, lookupServiceAddress2); + owner1 = channel1.isOwner(bundle, brokerId1); + owner2 = channel2.isOwner(bundle, brokerId2); assertTrue(owner1); assertFalse(owner2); - owner1 = channel2.isOwner(bundle, lookupServiceAddress1); - owner2 = channel1.isOwner(bundle, lookupServiceAddress2); + owner1 = channel2.isOwner(bundle, brokerId1); + owner2 = channel1.isOwner(bundle, brokerId2); assertTrue(owner1); assertFalse(owner2); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Assigning, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Assigning, brokerId1, 1)); assertFalse(channel1.isOwner(bundle)); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Owned, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Owned, brokerId1, 1)); assertTrue(channel1.isOwner(bundle)); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Releasing, null, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Releasing, null, brokerId1, 1)); assertFalse(channel1.isOwner(bundle)); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Splitting, null, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Splitting, null, brokerId1, 1)); assertTrue(channel1.isOwner(bundle)); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Free, null, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Free, null, brokerId1, 1)); assertFalse(channel1.isOwner(bundle)); - overrideTableView(channel1, bundle, new ServiceUnitStateData(Deleted, null, lookupServiceAddress1, 1)); + overrideTableView(channel1, bundle, new ServiceUnitStateData(Deleted, null, brokerId1, 1)); assertFalse(channel1.isOwner(bundle)); overrideTableView(channel1, bundle, null); assertFalse(channel1.isOwner(bundle)); } + @Test(priority = 15) + public void testGetOwnerAsync() throws Exception { + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Owned, brokerId1, 1)); + var owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId1, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Owned, brokerId2, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId2, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Assigning, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(!owner.isDone()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Assigning, brokerId2, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId2, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Releasing, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(!owner.isDone()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Releasing, brokerId2, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId2, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Releasing, null, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(Optional.empty(), owner.get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Splitting, null, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId1, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Splitting, null, brokerId2, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(brokerId2, owner.get().get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Free, null, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(Optional.empty(), owner.get()); + + overrideTableView(channel1, bundle, new ServiceUnitStateData(Deleted, null, brokerId1, 1)); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertTrue(owner.isCompletedExceptionally()); + + overrideTableView(channel1, bundle, null); + owner = channel1.getOwnerAsync(bundle); + assertTrue(owner.isDone()); + assertEquals(Optional.empty(), owner.get()); + } + @Test(priority = 16) public void splitAndRetryFailureTest() throws Exception { - channel1.publishAssignEventAsync(bundle3, lookupServiceAddress1); - waitUntilNewOwner(channel1, bundle3, lookupServiceAddress1); - waitUntilNewOwner(channel2, bundle3, lookupServiceAddress1); + channel1.publishAssignEventAsync(bundle3, brokerId1); + waitUntilNewOwner(channel1, bundle3, brokerId1); + waitUntilNewOwner(channel2, bundle3, brokerId1); var ownerAddr1 = channel1.getOwnerAsync(bundle3).get(); var ownerAddr2 = channel2.getOwnerAsync(bundle3).get(); - assertEquals(ownerAddr1, Optional.of(lookupServiceAddress1)); - assertEquals(ownerAddr2, Optional.of(lookupServiceAddress1)); + assertEquals(ownerAddr1, Optional.of(brokerId1)); + assertEquals(ownerAddr2, Optional.of(brokerId1)); assertTrue(ownerAddr1.isPresent()); NamespaceService namespaceService = spy(pulsar1.getNamespaceService()); @@ -1365,7 +1424,7 @@ public void splitAndRetryFailureTest() throws Exception { }); var leader = channel1.isChannelOwnerAsync().get() ? channel1 : channel2; ((ServiceUnitStateChannelImpl) leader) - .monitorOwnerships(List.of(lookupServiceAddress1, lookupServiceAddress2)); + .monitorOwnerships(List.of(brokerId1, brokerId2)); waitUntilState(leader, bundle3, Deleted); waitUntilState(channel1, bundle3, Deleted); waitUntilState(channel2, bundle3, Deleted); @@ -1376,14 +1435,14 @@ public void splitAndRetryFailureTest() throws Exception { validateEventCounters(channel1, 1, 0, 1, 0, 0, 0); validateEventCounters(channel2, 0, 0, 0, 0, 0, 0); - waitUntilNewOwner(channel1, childBundle31, lookupServiceAddress1); - waitUntilNewOwner(channel1, childBundle32, lookupServiceAddress1); - waitUntilNewOwner(channel2, childBundle31, lookupServiceAddress1); - waitUntilNewOwner(channel2, childBundle32, lookupServiceAddress1); - assertEquals(Optional.of(lookupServiceAddress1), channel1.getOwnerAsync(childBundle31).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel1.getOwnerAsync(childBundle32).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel2.getOwnerAsync(childBundle31).get()); - assertEquals(Optional.of(lookupServiceAddress1), channel2.getOwnerAsync(childBundle32).get()); + waitUntilNewOwner(channel1, childBundle31, brokerId1); + waitUntilNewOwner(channel1, childBundle32, brokerId1); + waitUntilNewOwner(channel2, childBundle31, brokerId1); + waitUntilNewOwner(channel2, childBundle32, brokerId1); + assertEquals(Optional.of(brokerId1), channel1.getOwnerAsync(childBundle31).get()); + assertEquals(Optional.of(brokerId1), channel1.getOwnerAsync(childBundle32).get()); + assertEquals(Optional.of(brokerId1), channel2.getOwnerAsync(childBundle31).get()); + assertEquals(Optional.of(brokerId1), channel2.getOwnerAsync(childBundle32).get()); // try the monitor and check the monitor moves `Deleted` -> `Init` @@ -1394,9 +1453,9 @@ public void splitAndRetryFailureTest() throws Exception { "semiTerminalStateWaitingTimeInMillis", 1, true); ((ServiceUnitStateChannelImpl) channel1).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); ((ServiceUnitStateChannelImpl) channel2).monitorOwnerships( - List.of(lookupServiceAddress1, lookupServiceAddress2)); + List.of(brokerId1, brokerId2)); waitUntilState(channel1, bundle3, Init); waitUntilState(channel2, bundle3, Init); @@ -1432,12 +1491,12 @@ public void testOverrideInactiveBrokerStateData() String leader = channel1.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); String leader2 = channel2.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); assertEquals(leader, leader2); - if (leader.equals(lookupServiceAddress2)) { + if (leader.equals(brokerId2)) { leaderChannel = channel2; followerChannel = channel1; } - String broker = lookupServiceAddress1; + String broker = brokerId1; // test override states String releasingBundle = "public/releasing/0xfffffff0_0xffffffff"; @@ -1462,7 +1521,7 @@ public void testOverrideInactiveBrokerStateData() new ServiceUnitStateData(Owned, broker, null, 1)); // test stable metadata state - doReturn(CompletableFuture.completedFuture(Optional.of(lookupServiceAddress2))) + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId2))) .when(loadManager).selectAsync(any(), any()); leaderChannel.handleMetadataSessionEvent(SessionReestablished); followerChannel.handleMetadataSessionEvent(SessionReestablished); @@ -1473,11 +1532,11 @@ public void testOverrideInactiveBrokerStateData() leaderChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); followerChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); - waitUntilNewOwner(channel2, releasingBundle, lookupServiceAddress2); - waitUntilNewOwner(channel2, childBundle11, lookupServiceAddress2); - waitUntilNewOwner(channel2, childBundle12, lookupServiceAddress2); - waitUntilNewOwner(channel2, assigningBundle, lookupServiceAddress2); - waitUntilNewOwner(channel2, ownedBundle, lookupServiceAddress2); + waitUntilNewOwner(channel2, releasingBundle, brokerId2); + waitUntilNewOwner(channel2, childBundle11, brokerId2); + waitUntilNewOwner(channel2, childBundle12, brokerId2); + waitUntilNewOwner(channel2, assigningBundle, brokerId2); + waitUntilNewOwner(channel2, ownedBundle, brokerId2); assertEquals(Optional.empty(), channel2.getOwnerAsync(freeBundle).get()); assertTrue(channel2.getOwnerAsync(deletedBundle).isCompletedExceptionally()); assertTrue(channel2.getOwnerAsync(splittingBundle).isCompletedExceptionally()); @@ -1497,12 +1556,12 @@ public void testOverrideOrphanStateData() String leader = channel1.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); String leader2 = channel2.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); assertEquals(leader, leader2); - if (leader.equals(lookupServiceAddress2)) { + if (leader.equals(brokerId2)) { leaderChannel = channel2; followerChannel = channel1; } - String broker = lookupServiceAddress1; + String broker = brokerId1; // test override states String releasingBundle = "public/releasing/0xfffffff0_0xffffffff"; @@ -1527,19 +1586,19 @@ public void testOverrideOrphanStateData() new ServiceUnitStateData(Owned, broker, null, 1)); // test stable metadata state - doReturn(CompletableFuture.completedFuture(Optional.of(lookupServiceAddress2))) + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId2))) .when(loadManager).selectAsync(any(), any()); FieldUtils.writeDeclaredField(leaderChannel, "inFlightStateWaitingTimeInMillis", -1, true); FieldUtils.writeDeclaredField(followerChannel, "inFlightStateWaitingTimeInMillis", -1, true); ((ServiceUnitStateChannelImpl) leaderChannel) - .monitorOwnerships(List.of(lookupServiceAddress1, lookupServiceAddress2)); + .monitorOwnerships(List.of(brokerId1, brokerId2)); waitUntilNewOwner(channel2, releasingBundle, broker); waitUntilNewOwner(channel2, childBundle11, broker); waitUntilNewOwner(channel2, childBundle12, broker); - waitUntilNewOwner(channel2, assigningBundle, lookupServiceAddress2); + waitUntilNewOwner(channel2, assigningBundle, brokerId2); waitUntilNewOwner(channel2, ownedBundle, broker); assertEquals(Optional.empty(), channel2.getOwnerAsync(freeBundle).get()); assertTrue(channel2.getOwnerAsync(deletedBundle).isCompletedExceptionally()); @@ -1553,10 +1612,84 @@ public void testOverrideOrphanStateData() cleanTableViews(); } + @Test(priority = 19) + public void testActiveGetOwner() throws Exception { + + + // set the bundle owner is the broker + String broker = brokerId2; + String bundle = "public/owned/0xfffffff0_0xffffffff"; + overrideTableViews(bundle, + new ServiceUnitStateData(Owned, broker, null, 1)); + var owner = channel1.getOwnerAsync(bundle).get(5, TimeUnit.SECONDS).get(); + assertEquals(owner, broker); + + // simulate the owner is inactive + var spyRegistry = spy(new BrokerRegistryImpl(pulsar)); + doReturn(CompletableFuture.completedFuture(Optional.empty())) + .when(spyRegistry).lookupAsync(eq(broker)); + FieldUtils.writeDeclaredField(channel1, + "brokerRegistry", spyRegistry , true); + FieldUtils.writeDeclaredField(channel1, + "inFlightStateWaitingTimeInMillis", 1000, true); + + + // verify getOwnerAsync times out because the owner is inactive now. + long start = System.currentTimeMillis(); + var ex = expectThrows(ExecutionException.class, () -> channel1.getOwnerAsync(bundle).get()); + assertTrue(ex.getCause() instanceof IllegalStateException); + assertTrue(System.currentTimeMillis() - start >= 1000); + + // simulate ownership cleanup(no selected owner) by the leader channel + doReturn(CompletableFuture.completedFuture(Optional.empty())) + .when(loadManager).selectAsync(any(), any()); + var leaderChannel = channel1; + String leader1 = channel1.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); + String leader2 = channel2.getChannelOwnerAsync().get(2, TimeUnit.SECONDS).get(); + assertEquals(leader1, leader2); + if (leader1.equals(brokerId2)) { + leaderChannel = channel2; + } + leaderChannel.handleMetadataSessionEvent(SessionReestablished); + FieldUtils.writeDeclaredField(leaderChannel, "lastMetadataSessionEventTimestamp", + System.currentTimeMillis() - (MAX_CLEAN_UP_DELAY_TIME_IN_SECS * 1000 + 1000), true); + leaderChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); + + // verify the ownership cleanup, and channel's getOwnerAsync returns empty result without timeout + FieldUtils.writeDeclaredField(channel1, + "inFlightStateWaitingTimeInMillis", 20 * 1000, true); + start = System.currentTimeMillis(); + assertTrue(channel1.getOwnerAsync(bundle).get().isEmpty()); + assertTrue(System.currentTimeMillis() - start < 20_000); + + // simulate ownership cleanup(brokerId1 selected owner) by the leader channel + overrideTableViews(bundle, + new ServiceUnitStateData(Owned, broker, null, 1)); + doReturn(CompletableFuture.completedFuture(Optional.of(brokerId1))) + .when(loadManager).selectAsync(any(), any()); + leaderChannel.handleMetadataSessionEvent(SessionReestablished); + FieldUtils.writeDeclaredField(leaderChannel, "lastMetadataSessionEventTimestamp", + System.currentTimeMillis() - (MAX_CLEAN_UP_DELAY_TIME_IN_SECS * 1000 + 1000), true); + getCleanupJobs(leaderChannel).clear(); + leaderChannel.handleBrokerRegistrationEvent(broker, NotificationType.Deleted); + + // verify the ownership cleanup, and channel's getOwnerAsync returns brokerId1 without timeout + start = System.currentTimeMillis(); + assertEquals(brokerId1, channel1.getOwnerAsync(bundle).get().get()); + assertTrue(System.currentTimeMillis() - start < 20_000); + + // test clean-up + FieldUtils.writeDeclaredField(channel1, + "inFlightStateWaitingTimeInMillis", 30 * 1000, true); + FieldUtils.writeDeclaredField(channel1, + "brokerRegistry", registry , true); + cleanTableViews(); + + } - private static ConcurrentOpenHashMap>> getOwnerRequests( + private static ConcurrentHashMap>> getOwnerRequests( ServiceUnitStateChannel channel) throws IllegalAccessException { - return (ConcurrentOpenHashMap>>) + return (ConcurrentHashMap>>) FieldUtils.readDeclaredField(channel, "getOwnerRequests", true); } @@ -1573,9 +1706,9 @@ private static long getLastMetadataSessionEventTimestamp(ServiceUnitStateChannel FieldUtils.readField(channel, "lastMetadataSessionEventTimestamp", true); } - private static ConcurrentOpenHashMap> getCleanupJobs( + private static ConcurrentHashMap> getCleanupJobs( ServiceUnitStateChannel channel) throws IllegalAccessException { - return (ConcurrentOpenHashMap>) + return (ConcurrentHashMap>) FieldUtils.readField(channel, "cleanupJobs", true); } @@ -1664,7 +1797,7 @@ private void waitUntilStateWithMonitor(ServiceUnitStateChannel channel, String k .atMost(10, TimeUnit.SECONDS) .until(() -> { // wait until true ((ServiceUnitStateChannelImpl) channel) - .monitorOwnerships(List.of(lookupServiceAddress1, lookupServiceAddress2)); + .monitorOwnerships(List.of(brokerId1, brokerId2)); ServiceUnitStateData data = tv.get(key); ServiceUnitState actual = state(data); return actual == expected; @@ -1700,10 +1833,13 @@ private void overrideTableViews(String serviceUnit, ServiceUnitStateData val) th overrideTableView(channel2, serviceUnit, val); } - private static void overrideTableView(ServiceUnitStateChannel channel, String serviceUnit, ServiceUnitStateData val) + @Test(enabled = false) + public static void overrideTableView(ServiceUnitStateChannel channel, String serviceUnit, ServiceUnitStateData val) throws IllegalAccessException { var tv = (TableViewImpl) FieldUtils.readField(channel, "tableview", true); + var getOwnerRequests = (Map>) + FieldUtils.readField(channel, "getOwnerRequests", true); var cache = (ConcurrentMap) FieldUtils.readField(tv, "data", true); if(val == null){ @@ -1711,6 +1847,7 @@ private static void overrideTableView(ServiceUnitStateChannel channel, String se } else { cache.put(serviceUnit, val); } + getOwnerRequests.clear(); } private static void cleanOpsCounters(ServiceUnitStateChannel channel) @@ -1886,7 +2023,7 @@ ServiceUnitStateChannelImpl createChannel(PulsarService pulsar) var leaderElectionService = new LeaderElectionService( - pulsar.getCoordinationService(), pulsar.getSafeWebServiceAddress(), + pulsar.getCoordinationService(), pulsar.getBrokerId(), pulsar.getSafeWebServiceAddress(), state -> { if (state == LeaderElectionState.Leading) { channel.scheduleOwnershipMonitor(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilterTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilterTestBase.java index 68bd7b29094cd..a120ef473e9a5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilterTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerFilterTestBase.java @@ -90,10 +90,25 @@ public void closeTableView() throws IOException { } + @Override + public void start() throws LoadDataStoreException { + + } + + @Override + public void init() throws IOException { + + } + @Override public void startTableView() throws LoadDataStoreException { } + + @Override + public void startProducer() throws LoadDataStoreException { + + } }; configuration.setPreferLaterVersions(true); doReturn(configuration).when(mockContext).brokerConfiguration(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilterTest.java index f45e1405e1d21..87aaf4bac7fae 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/filter/BrokerIsolationPoliciesFilterTest.java @@ -84,20 +84,20 @@ public void testFilterWithNamespaceIsolationPoliciesForPrimaryAndSecondaryBroker // a. available-brokers: broker1, broker2, broker3 => result: broker1 Map result = filter.filterAsync(new HashMap<>(Map.of( - "broker1", getLookupData(), - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceName, getContext()).get(); - assertEquals(result.keySet(), Set.of("broker1")); + "broker1:8080", getLookupData(), + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker1:8080")); // b. available-brokers: broker2, broker3 => result: broker2 result = filter.filterAsync(new HashMap<>(Map.of( - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceName, getContext()).get(); - assertEquals(result.keySet(), Set.of("broker2")); + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker2:8080")); // c. available-brokers: broker3 => result: NULL result = filter.filterAsync(new HashMap<>(Map.of( - "broker3", getLookupData())), namespaceName, getContext()).get(); + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); assertTrue(result.isEmpty()); // 2. Namespace: primary=broker1, secondary=broker2, shared=broker3, min_limit = 2 @@ -105,20 +105,20 @@ public void testFilterWithNamespaceIsolationPoliciesForPrimaryAndSecondaryBroker // a. available-brokers: broker1, broker2, broker3 => result: broker1, broker2 result = filter.filterAsync(new HashMap<>(Map.of( - "broker1", getLookupData(), - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceName, getContext()).get(); - assertEquals(result.keySet(), Set.of("broker1", "broker2")); + "broker1:8080", getLookupData(), + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker1:8080", "broker2:8080")); // b. available-brokers: broker2, broker3 => result: broker2 result = filter.filterAsync(new HashMap<>(Map.of( - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceName, getContext()).get(); - assertEquals(result.keySet(), Set.of("broker2")); + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker2:8080")); // c. available-brokers: broker3 => result: NULL result = filter.filterAsync(new HashMap<>(Map.of( - "broker3", getLookupData())), namespaceName, getContext()).get(); + "broker3:8080", getLookupData())), namespaceName, getContext()).get(); assertTrue(result.isEmpty()); } @@ -142,31 +142,31 @@ public void testFilterWithPersistentOrNonPersistentDisabled() Map result = filter.filterAsync(new HashMap<>(Map.of( - "broker1", getLookupData(), - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceBundle, getContext()).get(); - assertEquals(result.keySet(), Set.of("broker1", "broker2", "broker3")); + "broker1:8080", getLookupData(), + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceBundle, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker1:8080", "broker2:8080", "broker3:8080")); result = filter.filterAsync(new HashMap<>(Map.of( - "broker1", getLookupData(true, false), - "broker2", getLookupData(true, false), - "broker3", getLookupData())), namespaceBundle, getContext()).get(); - assertEquals(result.keySet(), Set.of("broker3")); + "broker1:8080", getLookupData(true, false), + "broker2:8080", getLookupData(true, false), + "broker3:8080", getLookupData())), namespaceBundle, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker3:8080")); doReturn(false).when(namespaceBundle).hasNonPersistentTopic(); result = filter.filterAsync(new HashMap<>(Map.of( - "broker1", getLookupData(), - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceBundle, getContext()).get(); - assertEquals(result.keySet(), Set.of("broker1", "broker2", "broker3")); + "broker1:8080", getLookupData(), + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceBundle, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker1:8080", "broker2:8080", "broker3:8080")); result = filter.filterAsync(new HashMap<>(Map.of( - "broker1", getLookupData(false, true), - "broker2", getLookupData(), - "broker3", getLookupData())), namespaceBundle, getContext()).get(); - assertEquals(result.keySet(), Set.of("broker2", "broker3")); + "broker1:8080", getLookupData(false, true), + "broker2:8080", getLookupData(), + "broker3:8080", getLookupData())), namespaceBundle, getContext()).get(); + assertEquals(result.keySet(), Set.of("broker2:8080", "broker3:8080")); } private void setIsolationPolicies(SimpleResourceAllocationPolicies policies, diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedderTest.java index 26d95a0158d52..0ff64616973d9 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/scheduler/TransferShedderTest.java @@ -100,6 +100,7 @@ import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats; import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.SystemResourceUsage; +import org.assertj.core.api.Assertions; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -168,18 +169,18 @@ public LoadManagerContext setupContext(){ var ctx = getContext(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000)); - topBundlesLoadDataStore.pushAsync("broker2", getTopBundlesLoad("my-tenant/my-namespaceB", 1000000, 3000000)); - topBundlesLoadDataStore.pushAsync("broker3", getTopBundlesLoad("my-tenant/my-namespaceC", 2000000, 4000000)); - topBundlesLoadDataStore.pushAsync("broker4", getTopBundlesLoad("my-tenant/my-namespaceD", 2000000, 6000000)); - topBundlesLoadDataStore.pushAsync("broker5", getTopBundlesLoad("my-tenant/my-namespaceE", 2000000, 7000000)); + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 1000000, 3000000)); + topBundlesLoadDataStore.pushAsync("broker3:8080", getTopBundlesLoad("my-tenant/my-namespaceC", 2000000, 4000000)); + topBundlesLoadDataStore.pushAsync("broker4:8080", getTopBundlesLoad("my-tenant/my-namespaceD", 2000000, 6000000)); + topBundlesLoadDataStore.pushAsync("broker5:8080", getTopBundlesLoad("my-tenant/my-namespaceE", 2000000, 7000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 2, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 4, "broker2")); - brokerLoadDataStore.pushAsync("broker3", getCpuLoad(ctx, 6, "broker3")); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 80, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 90, "broker5")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 2, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 4, "broker2:8080")); + brokerLoadDataStore.pushAsync("broker3:8080", getCpuLoad(ctx, 6, "broker3:8080")); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 80, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 90, "broker5:8080")); return ctx; } @@ -192,9 +193,9 @@ public LoadManagerContext setupContext(int clusterSize) { Random rand = new Random(); for (int i = 0; i < clusterSize; i++) { int brokerLoad = rand.nextInt(1000); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); int bundleLoad = rand.nextInt(brokerLoad + 1); - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, bundleLoad, brokerLoad - bundleLoad)); } return ctx; @@ -209,14 +210,14 @@ public LoadManagerContext setupContextLoadSkewedOverload(int clusterSize) { int i = 0; for (; i < clusterSize-1; i++) { int brokerLoad = 1; - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, 300_000, 700_000)); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); } int brokerLoad = 100; - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, 30_000_000, 70_000_000)); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); return ctx; } @@ -230,21 +231,21 @@ public LoadManagerContext setupContextLoadSkewedUnderload(int clusterSize) { int i = 0; for (; i < clusterSize-2; i++) { int brokerLoad = 98; - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, 30_000_000, 70_000_000)); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); } int brokerLoad = 99; - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, 30_000_000, 70_000_000)); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); i++; brokerLoad = 1; - topBundlesLoadDataStore.pushAsync("broker" + i, getTopBundlesLoad("my-tenant/my-namespace" + i, + topBundlesLoadDataStore.pushAsync("broker" + i + ":8080", getTopBundlesLoad("my-tenant/my-namespace" + i, 300_000, 700_000)); - brokerLoadDataStore.pushAsync("broker" + i, getCpuLoad(ctx, brokerLoad, "broker" + i)); + brokerLoadDataStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, brokerLoad, "broker" + i + ":8080")); return ctx; } @@ -383,10 +384,25 @@ public void closeTableView() throws IOException { } + @Override + public void start() throws LoadDataStoreException { + + } + + @Override + public void init() throws IOException { + + } + @Override public void startTableView() throws LoadDataStoreException { } + + @Override + public void startProducer() throws LoadDataStoreException { + + } }; var topBundleLoadDataStore = new LoadDataStore() { @@ -436,19 +452,34 @@ public void closeTableView() throws IOException { } + @Override + public void start() throws LoadDataStoreException { + + } + + @Override + public void init() throws IOException { + + } + @Override public void startTableView() throws LoadDataStoreException { } + + @Override + public void startProducer() throws LoadDataStoreException { + + } }; BrokerRegistry brokerRegistry = mock(BrokerRegistry.class); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", getMockBrokerLookupData(), - "broker2", getMockBrokerLookupData(), - "broker3", getMockBrokerLookupData(), - "broker4", getMockBrokerLookupData(), - "broker5", getMockBrokerLookupData() + "broker1:8080", getMockBrokerLookupData(), + "broker2:8080", getMockBrokerLookupData(), + "broker3:8080", getMockBrokerLookupData(), + "broker4:8080", getMockBrokerLookupData(), + "broker5:8080", getMockBrokerLookupData() ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); doReturn(conf).when(ctx).brokerConfiguration(); doReturn(brokerLoadDataStore).when(ctx).brokerLoadDataStore(); @@ -496,11 +527,11 @@ public void testEmptyTopBundlesLoadData() { var ctx = getContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 2, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 4, "broker2")); - brokerLoadDataStore.pushAsync("broker3", getCpuLoad(ctx, 6, "broker3")); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 80, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 90, "broker5")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 2, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 4, "broker2:8080")); + brokerLoadDataStore.pushAsync("broker3:8080", getCpuLoad(ctx, 6, "broker3:8080")); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 80, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 90, "broker5:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); @@ -520,11 +551,11 @@ public void testOutDatedLoadData() throws IllegalAccessException { assertEquals(res.size(), 2); - FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker1").get(), "updatedAt", 0, true); - FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker2").get(), "updatedAt", 0, true); - FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker3").get(), "updatedAt", 0, true); - FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker4").get(), "updatedAt", 0, true); - FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker5").get(), "updatedAt", 0, true); + FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker1:8080").get(), "updatedAt", 0, true); + FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker2:8080").get(), "updatedAt", 0, true); + FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker3:8080").get(), "updatedAt", 0, true); + FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker4:8080").get(), "updatedAt", 0, true); + FieldUtils.writeDeclaredField(brokerLoadDataStore.get("broker5:8080").get(), "updatedAt", 0, true); res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); @@ -541,20 +572,20 @@ public void testRecentlyUnloadedBrokers() { Map recentlyUnloadedBrokers = new HashMap<>(); var oldTS = System.currentTimeMillis() - ctx.brokerConfiguration() .getLoadBalancerBrokerLoadDataTTLInSeconds() * 1001; - recentlyUnloadedBrokers.put("broker1", oldTS); + recentlyUnloadedBrokers.put("broker1:8080", oldTS); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), recentlyUnloadedBrokers); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); var now = System.currentTimeMillis(); - recentlyUnloadedBrokers.put("broker1", now); + recentlyUnloadedBrokers.put("broker1:8080", now); res = transferShedder.findBundlesForUnloading(ctx, Map.of(), recentlyUnloadedBrokers); assertTrue(res.isEmpty()); @@ -574,9 +605,9 @@ public void testRecentlyUnloadedBundles() { recentlyUnloadedBundles.put(bundleD2, now); var res = transferShedder.findBundlesForUnloading(ctx, recentlyUnloadedBundles, Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker3", + expected.add(new UnloadDecision(new Unload("broker3:8080", "my-tenant/my-namespaceC/0x00000000_0x0FFFFFFF", - Optional.of("broker1")), + Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -609,13 +640,13 @@ public void testBundlesWithIsolationPolicies() { isolationPoliciesHelper, antiAffinityGroupPolicyHelper)); setIsolationPolicies(allocationPoliciesSpy, "my-tenant/my-namespaceE", - Set.of("broker5"), Set.of(), Set.of(), 1); + Set.of("broker5:8080"), Set.of(), Set.of(), 1); var ctx = setupContext(); ctx.brokerConfiguration().setLoadBalancerSheddingBundlesWithPoliciesEnabled(true); doReturn(ctx.brokerConfiguration()).when(pulsar).getConfiguration(); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -626,7 +657,7 @@ public void testBundlesWithIsolationPolicies() { res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); expected = new HashSet<>(); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.empty()), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.empty()), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -747,7 +778,7 @@ public void testBundlesWithAntiAffinityGroup() throws MetadataStoreException { }).when(antiAffinityGroupPolicyHelper).filterAsync(any(), any()); var res2 = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected2 = new HashSet<>(); - expected2.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected2.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res2, expected2); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -823,22 +854,22 @@ public void testTargetStd() { var ctx = getContext(); BrokerRegistry brokerRegistry = mock(BrokerRegistry.class); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", mock(BrokerLookupData.class), - "broker2", mock(BrokerLookupData.class), - "broker3", mock(BrokerLookupData.class) + "broker1:8080", mock(BrokerLookupData.class), + "broker2:8080", mock(BrokerLookupData.class), + "broker3:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); doReturn(brokerRegistry).when(ctx).brokerRegistry(); ctx.brokerConfiguration().setLoadBalancerDebugModeEnabled(true); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 10, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 20, "broker2")); - brokerLoadDataStore.pushAsync("broker3", getCpuLoad(ctx, 30, "broker3")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 10, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 20, "broker2:8080")); + brokerLoadDataStore.pushAsync("broker3:8080", getCpuLoad(ctx, 30, "broker3:8080")); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 30, 30)); - topBundlesLoadDataStore.pushAsync("broker2", getTopBundlesLoad("my-tenant/my-namespaceB", 40, 40)); - topBundlesLoadDataStore.pushAsync("broker3", getTopBundlesLoad("my-tenant/my-namespaceC", 50, 50)); + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 30, 30)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 40, 40)); + topBundlesLoadDataStore.pushAsync("broker3:8080", getTopBundlesLoad("my-tenant/my-namespaceC", 50, 50)); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); @@ -854,11 +885,11 @@ public void testSingleTopBundlesLoadData() { TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 1)); - topBundlesLoadDataStore.pushAsync("broker2", getTopBundlesLoad("my-tenant/my-namespaceB", 2)); - topBundlesLoadDataStore.pushAsync("broker3", getTopBundlesLoad("my-tenant/my-namespaceC", 6)); - topBundlesLoadDataStore.pushAsync("broker4", getTopBundlesLoad("my-tenant/my-namespaceD", 10)); - topBundlesLoadDataStore.pushAsync("broker5", getTopBundlesLoad("my-tenant/my-namespaceE", 70)); + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 2)); + topBundlesLoadDataStore.pushAsync("broker3:8080", getTopBundlesLoad("my-tenant/my-namespaceC", 6)); + topBundlesLoadDataStore.pushAsync("broker4:8080", getTopBundlesLoad("my-tenant/my-namespaceD", 10)); + topBundlesLoadDataStore.pushAsync("broker5:8080", getTopBundlesLoad("my-tenant/my-namespaceE", 70)); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); @@ -873,14 +904,14 @@ public void testBundleThroughputLargerThanOffloadThreshold() { TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker4", getTopBundlesLoad("my-tenant/my-namespaceD", 1000000000, 1000000000)); - topBundlesLoadDataStore.pushAsync("broker5", getTopBundlesLoad("my-tenant/my-namespaceE", 1000000000, 1000000000)); + topBundlesLoadDataStore.pushAsync("broker4:8080", getTopBundlesLoad("my-tenant/my-namespaceD", 1000000000, 1000000000)); + topBundlesLoadDataStore.pushAsync("broker5:8080", getTopBundlesLoad("my-tenant/my-namespaceE", 1000000000, 1000000000)); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker3", + expected.add(new UnloadDecision(new Unload("broker3:8080", "my-tenant/my-namespaceC/0x00000000_0x0FFFFFFF", - Optional.of("broker1")), + Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -894,12 +925,12 @@ public void testTargetStdAfterTransfer() { TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 55, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 65, "broker5")); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 55, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 65, "broker5:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), 0.26400000000000007); @@ -915,43 +946,43 @@ public void testUnloadBundlesGreaterThanTargetThroughput() throws IllegalAccessE var brokerRegistry = mock(BrokerRegistry.class); doReturn(brokerRegistry).when(ctx).brokerRegistry(); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", mock(BrokerLookupData.class), - "broker2", mock(BrokerLookupData.class) + "broker1:8080", mock(BrokerLookupData.class), + "broker2:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000, 3000000, 4000000, 5000000)); - topBundlesLoadDataStore.pushAsync("broker2", + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000, 3000000, 4000000, 5000000)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 100000000, 180000000, 220000000, 250000000, 250000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 10, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 1000, "broker2")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 10, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 1000, "broker2:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); expected.add(new UnloadDecision( - new Unload("broker2", "my-tenant/my-namespaceB/0x00000000_0x1FFFFFFF", Optional.of("broker1")), + new Unload("broker2:8080", "my-tenant/my-namespaceB/0x00000000_0x1FFFFFFF", Optional.of("broker1:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker2", "my-tenant/my-namespaceB/0x1FFFFFFF_0x2FFFFFFF", Optional.of("broker1")), + new Unload("broker2:8080", "my-tenant/my-namespaceB/0x1FFFFFFF_0x2FFFFFFF", Optional.of("broker1:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker2", "my-tenant/my-namespaceB/0x2FFFFFFF_0x3FFFFFFF", Optional.of("broker1")), + new Unload("broker2:8080", "my-tenant/my-namespaceB/0x2FFFFFFF_0x3FFFFFFF", Optional.of("broker1:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker1", "my-tenant/my-namespaceA/0x00000000_0x1FFFFFFF", Optional.of("broker2")), + new Unload("broker1:8080", "my-tenant/my-namespaceA/0x00000000_0x1FFFFFFF", Optional.of("broker2:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker1", "my-tenant/my-namespaceA/0x1FFFFFFF_0x2FFFFFFF", Optional.of("broker2")), + new Unload("broker1:8080", "my-tenant/my-namespaceA/0x1FFFFFFF_0x2FFFFFFF", Optional.of("broker2:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker1","my-tenant/my-namespaceA/0x2FFFFFFF_0x3FFFFFFF", Optional.of("broker2")), + new Unload("broker1:8080","my-tenant/my-namespaceA/0x2FFFFFFF_0x3FFFFFFF", Optional.of("broker2:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker1","my-tenant/my-namespaceA/0x3FFFFFFF_0x4FFFFFFF", Optional.of("broker2")), + new Unload("broker1:8080","my-tenant/my-namespaceA/0x3FFFFFFF_0x4FFFFFFF", Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(counter.getLoadAvg(), 5.05); assertEquals(counter.getLoadStd(), 4.95); @@ -971,20 +1002,20 @@ public void testSkipBundlesGreaterThanTargetThroughputAfterSplit() { var brokerRegistry = mock(BrokerRegistry.class); doReturn(brokerRegistry).when(ctx).brokerRegistry(); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", mock(BrokerLookupData.class), - "broker2", mock(BrokerLookupData.class) + "broker1:8080", mock(BrokerLookupData.class), + "broker2:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1, 500000000)); - topBundlesLoadDataStore.pushAsync("broker2", + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 500000000, 500000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 50, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 100, "broker2")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 50, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 100, "broker2:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); @@ -1002,24 +1033,24 @@ public void testUnloadBundlesLessThanTargetThroughputAfterSplit() throws Illegal var brokerRegistry = mock(BrokerRegistry.class); doReturn(brokerRegistry).when(ctx).brokerRegistry(); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", mock(BrokerLookupData.class), - "broker2", mock(BrokerLookupData.class) + "broker1:8080", mock(BrokerLookupData.class), + "broker2:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000, 3000000, 4000000, 5000000)); - topBundlesLoadDataStore.pushAsync("broker2", getTopBundlesLoad("my-tenant/my-namespaceB", 490000000, 510000000)); + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1000000, 2000000, 3000000, 4000000, 5000000)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 490000000, 510000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 10, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 1000, "broker2")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 10, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 1000, "broker2:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); expected.add(new UnloadDecision( - new Unload("broker2", "my-tenant/my-namespaceB/0x00000000_0x0FFFFFFF", Optional.of("broker1")), + new Unload("broker2:8080", "my-tenant/my-namespaceB/0x00000000_0x0FFFFFFF", Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(counter.getLoadAvg(), 5.05); assertEquals(counter.getLoadStd(), 4.95); @@ -1040,30 +1071,30 @@ public void testUnloadBundlesGreaterThanTargetThroughputAfterSplit() throws Ille var brokerRegistry = mock(BrokerRegistry.class); doReturn(brokerRegistry).when(ctx).brokerRegistry(); doReturn(CompletableFuture.completedFuture(Map.of( - "broker1", mock(BrokerLookupData.class), - "broker2", mock(BrokerLookupData.class) + "broker1:8080", mock(BrokerLookupData.class), + "broker2:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker1", getTopBundlesLoad("my-tenant/my-namespaceA", 2400000, 2400000)); - topBundlesLoadDataStore.pushAsync("broker2", getTopBundlesLoad("my-tenant/my-namespaceB", 5000000, 5000000)); + topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 2400000, 2400000)); + topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 5000000, 5000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker1", getCpuLoad(ctx, 48, "broker1")); - brokerLoadDataStore.pushAsync("broker2", getCpuLoad(ctx, 100, "broker2")); + brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 48, "broker1:8080")); + brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 100, "broker2:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); expected.add(new UnloadDecision( - new Unload("broker1", - res.stream().filter(x -> x.getUnload().sourceBroker().equals("broker1")).findFirst().get() - .getUnload().serviceUnit(), Optional.of("broker2")), + new Unload("broker1:8080", + res.stream().filter(x -> x.getUnload().sourceBroker().equals("broker1:8080")).findFirst().get() + .getUnload().serviceUnit(), Optional.of("broker2:8080")), Success, Overloaded)); expected.add(new UnloadDecision( - new Unload("broker2", - res.stream().filter(x -> x.getUnload().sourceBroker().equals("broker2")).findFirst().get() - .getUnload().serviceUnit(), Optional.of("broker1")), + new Unload("broker2:8080", + res.stream().filter(x -> x.getUnload().sourceBroker().equals("broker2:8080")).findFirst().get() + .getUnload().serviceUnit(), Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(counter.getLoadAvg(), 0.74); assertEquals(counter.getLoadStd(), 0.26); @@ -1081,18 +1112,18 @@ public void testMinBrokerWithZeroTraffic() throws IllegalAccessException { var ctx = setupContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - var load = getCpuLoad(ctx, 4, "broker2"); + var load = getCpuLoad(ctx, 4, "broker2:8080"); FieldUtils.writeDeclaredField(load,"msgThroughputEMA", 0, true); - brokerLoadDataStore.pushAsync("broker2", load); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 55, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 65, "broker5")); + brokerLoadDataStore.pushAsync("broker2:8080", load); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 55, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 65, "broker5:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Underloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), 0.26400000000000007); @@ -1106,17 +1137,17 @@ public void testMinBrokerWithLowerLoadThanAvg() throws IllegalAccessException { var ctx = setupContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - var load = getCpuLoad(ctx, 3 , "broker2"); - brokerLoadDataStore.pushAsync("broker2", load); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 55, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 65, "broker5")); + var load = getCpuLoad(ctx, 3 , "broker2:8080"); + brokerLoadDataStore.pushAsync("broker2:8080", load); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 55, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 65, "broker5:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Underloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), 0.262); @@ -1132,9 +1163,9 @@ public void testMaxNumberOfTransfersPerShedderCycle() { .setLoadBalancerMaxNumberOfBrokerSheddingPerCycle(10); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -1158,9 +1189,9 @@ public void testLoadBalancerSheddingConditionHitCountThreshold() { } var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -1173,13 +1204,13 @@ public void testRemainingTopBundles() { TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); - topBundlesLoadDataStore.pushAsync("broker5", getTopBundlesLoad("my-tenant/my-namespaceE", 2000000, 3000000)); + topBundlesLoadDataStore.pushAsync("broker5:8080", getTopBundlesLoad("my-tenant/my-namespaceE", 2000000, 3000000)); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); @@ -1193,14 +1224,14 @@ public void testLoadMoreThan100() throws IllegalAccessException { var ctx = setupContext(); var brokerLoadDataStore = ctx.brokerLoadDataStore(); - brokerLoadDataStore.pushAsync("broker4", getCpuLoad(ctx, 200, "broker4")); - brokerLoadDataStore.pushAsync("broker5", getCpuLoad(ctx, 1000, "broker5")); + brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 200, "broker4:8080")); + brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 1000, "broker5:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); - expected.add(new UnloadDecision(new Unload("broker5", bundleE1, Optional.of("broker1")), + expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")), Success, Overloaded)); - expected.add(new UnloadDecision(new Unload("broker4", bundleD1, Optional.of("broker2")), + expected.add(new UnloadDecision(new Unload("broker4:8080", bundleD1, Optional.of("broker2:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), 2.4240000000000004); @@ -1234,13 +1265,16 @@ public void testOverloadOutlier() { TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContextLoadSkewedOverload(100); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); - var expected = new HashSet(); - expected.add(new UnloadDecision( - new Unload("broker99", "my-tenant/my-namespace99/0x00000000_0x0FFFFFFF", - Optional.of("broker52")), Success, Overloaded)); - assertEquals(res, expected); - assertEquals(counter.getLoadAvg(), 0.019900000000000008); - assertEquals(counter.getLoadStd(), 0.09850375627355534); + Assertions.assertThat(res).isIn( + Set.of(new UnloadDecision( + new Unload("broker99:8080", "my-tenant/my-namespace99/0x00000000_0x0FFFFFFF", + Optional.of("broker52:8080")), Success, Overloaded)), + Set.of(new UnloadDecision( + new Unload("broker99:8080", "my-tenant/my-namespace99/0x00000000_0x0FFFFFFF", + Optional.of("broker83:8080")), Success, Overloaded)) + ); + assertEquals(counter.getLoadAvg(), 0.019900000000000008, 0.00001); + assertEquals(counter.getLoadStd(), 0.09850375627355534, 0.00001); } @Test @@ -1251,11 +1285,11 @@ public void testUnderloadOutlier() { var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet(); expected.add(new UnloadDecision( - new Unload("broker98", "my-tenant/my-namespace98/0x00000000_0x0FFFFFFF", - Optional.of("broker99")), Success, Underloaded)); + new Unload("broker98:8080", "my-tenant/my-namespace98/0x00000000_0x0FFFFFFF", + Optional.of("broker99:8080")), Success, Underloaded)); assertEquals(res, expected); - assertEquals(counter.getLoadAvg(), 0.9704000000000005); - assertEquals(counter.getLoadStd(), 0.09652895938523735); + assertEquals(counter.getLoadAvg(), 0.9704000000000005, 0.00001); + assertEquals(counter.getLoadStd(), 0.09652895938523735, 0.00001); } @Test @@ -1271,13 +1305,13 @@ public void testRandomLoadStats() { double[] loads = new double[numBrokers]; final Map availableBrokers = new HashMap<>(); for (int i = 0; i < loads.length; i++) { - availableBrokers.put("broker" + i, mock(BrokerLookupData.class)); + availableBrokers.put("broker" + i + ":8080", mock(BrokerLookupData.class)); } stats.update(loadStore, availableBrokers, Map.of(), conf); var brokerLoadDataStore = ctx.brokerLoadDataStore(); for (int i = 0; i < loads.length; i++) { - loads[i] = loadStore.get("broker" + i).get().getWeightedMaxEMA(); + loads[i] = loadStore.get("broker" + i + ":8080").get().getWeightedMaxEMA(); } int i = 0; int j = loads.length - 1; @@ -1312,8 +1346,8 @@ public void testHighVarianceLoadStats() { var conf = ctx.brokerConfiguration(); final Map availableBrokers = new HashMap<>(); for (int i = 0; i < loads.length; i++) { - availableBrokers.put("broker" + i, mock(BrokerLookupData.class)); - loadStore.pushAsync("broker" + i, getCpuLoad(ctx, loads[i], "broker" + i)); + availableBrokers.put("broker" + i + ":8080", mock(BrokerLookupData.class)); + loadStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, loads[i], "broker" + i + ":8080")); } stats.update(loadStore, availableBrokers, Map.of(), conf); @@ -1331,8 +1365,8 @@ public void testLowVarianceLoadStats() { var conf = ctx.brokerConfiguration(); final Map availableBrokers = new HashMap<>(); for (int i = 0; i < loads.length; i++) { - availableBrokers.put("broker" + i, mock(BrokerLookupData.class)); - loadStore.pushAsync("broker" + i, getCpuLoad(ctx, loads[i], "broker" + i)); + availableBrokers.put("broker" + i + ":8080", mock(BrokerLookupData.class)); + loadStore.pushAsync("broker" + i + ":8080", getCpuLoad(ctx, loads[i], "broker" + i + ":8080")); } stats.update(loadStore, availableBrokers, Map.of(), conf); assertEquals(stats.avg(), 3.9449999999999994); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreTest.java index 184c337a47c80..d25cba2bd1bdd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/store/LoadDataStoreTest.java @@ -20,7 +20,6 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertNotNull; import static org.testng.AssertJUnit.assertTrue; import com.google.common.collect.Sets; @@ -28,6 +27,7 @@ import lombok.Cleanup; import lombok.Data; import lombok.NoArgsConstructor; +import org.apache.commons.lang.reflect.FieldUtils; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicDomain; @@ -74,7 +74,8 @@ public void testPushGetAndRemove() throws Exception { @Cleanup LoadDataStore loadDataStore = - LoadDataStoreFactory.create(pulsar.getClient(), topic, MyClass.class); + LoadDataStoreFactory.create(pulsar, topic, MyClass.class); + loadDataStore.startProducer(); loadDataStore.startTableView(); MyClass myClass1 = new MyClass("1", 1); loadDataStore.pushAsync("key1", myClass1).get(); @@ -107,7 +108,8 @@ public void testForEach() throws Exception { @Cleanup LoadDataStore loadDataStore = - LoadDataStoreFactory.create(pulsar.getClient(), topic, Integer.class); + LoadDataStoreFactory.create(pulsar, topic, Integer.class); + loadDataStore.startProducer(); loadDataStore.startTableView(); Map map = new HashMap<>(); @@ -131,7 +133,8 @@ public void testForEach() throws Exception { public void testTableViewRestart() throws Exception { String topic = TopicDomain.persistent + "://" + NamespaceName.SYSTEM_NAMESPACE + "/" + UUID.randomUUID(); LoadDataStore loadDataStore = - LoadDataStoreFactory.create(pulsar.getClient(), topic, Integer.class); + LoadDataStoreFactory.create(pulsar, topic, Integer.class); + loadDataStore.startProducer(); loadDataStore.startTableView(); loadDataStore.pushAsync("1", 1).get(); @@ -140,15 +143,26 @@ public void testTableViewRestart() throws Exception { loadDataStore.closeTableView(); loadDataStore.pushAsync("1", 2).get(); - Exception ex = null; - try { - loadDataStore.get("1"); - } catch (IllegalStateException e) { - ex = e; - } - assertNotNull(ex); - loadDataStore.startTableView(); Awaitility.await().untilAsserted(() -> assertEquals(loadDataStore.get("1").get(), 2)); + + loadDataStore.pushAsync("1", 3).get(); + FieldUtils.writeField(loadDataStore, "tableViewLastUpdateTimestamp", 0 , true); + Awaitility.await().untilAsserted(() -> assertEquals(loadDataStore.get("1").get(), 3)); + } + + @Test + public void testProducerStop() throws Exception { + String topic = TopicDomain.persistent + "://" + NamespaceName.SYSTEM_NAMESPACE + "/" + UUID.randomUUID(); + LoadDataStore loadDataStore = + LoadDataStoreFactory.create(pulsar, topic, Integer.class); + loadDataStore.startProducer(); + loadDataStore.pushAsync("1", 1).get(); + loadDataStore.removeAsync("1").get(); + + loadDataStore.close(); + + loadDataStore.pushAsync("2", 2).get(); + loadDataStore.removeAsync("2").get(); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeightTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeightTest.java index 0eea1d87513bf..b1e09bf2f3afb 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeightTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/extensions/strategy/LeastResourceUsageWithWeightTest.java @@ -252,10 +252,25 @@ public void closeTableView() throws IOException { } + @Override + public void start() throws LoadDataStoreException { + + } + + @Override + public void init() throws IOException { + + } + @Override public void startTableView() throws LoadDataStoreException { } + + @Override + public void startProducer() throws LoadDataStoreException { + + } }; doReturn(conf).when(ctx).brokerConfiguration(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImplTest.java index 3fb62f486ab8d..f6a467af41bbe 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImplTest.java @@ -22,7 +22,6 @@ import static org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl.TIME_AVERAGE_BROKER_ZPATH; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -30,6 +29,7 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.BoundType; import com.google.common.collect.Range; @@ -45,7 +45,9 @@ import java.util.Optional; import java.util.Random; import java.util.Set; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; @@ -60,29 +62,38 @@ import org.apache.pulsar.broker.loadbalance.LoadBalancerTestingUtils; import org.apache.pulsar.broker.loadbalance.LoadData; import org.apache.pulsar.broker.loadbalance.LoadManager; +import org.apache.pulsar.broker.loadbalance.ResourceUnit; import org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared.BrokerTopicLoadingPredicate; import org.apache.pulsar.client.admin.Namespaces; import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceBundleFactory; +import org.apache.pulsar.common.naming.NamespaceBundleSplitAlgorithm; import org.apache.pulsar.common.naming.NamespaceBundles; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.ServiceUnitId; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.BundlesData; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.NamespaceIsolationDataImpl; +import org.apache.pulsar.common.policies.data.ResourceQuota; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.util.ObjectMapperFactory; +import org.apache.pulsar.common.util.PortManager; +import org.apache.pulsar.metadata.api.MetadataCache; import org.apache.pulsar.metadata.api.Notification; import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.extended.CreateOption; +import org.apache.pulsar.policies.data.loadbalancer.BrokerData; +import org.apache.pulsar.policies.data.loadbalancer.BundleData; import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats; import org.apache.pulsar.policies.data.loadbalancer.ResourceUsage; import org.apache.pulsar.policies.data.loadbalancer.SystemResourceUsage; -import org.apache.pulsar.policies.data.loadbalancer.BrokerData; -import org.apache.pulsar.policies.data.loadbalancer.BundleData; import org.apache.pulsar.policies.data.loadbalancer.TimeAverageBrokerData; import org.apache.pulsar.policies.data.loadbalancer.TimeAverageMessageData; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; @@ -91,6 +102,7 @@ import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @Slf4j @@ -108,8 +120,9 @@ public class ModularLoadManagerImplTest { private PulsarService pulsar3; - private String primaryHost; - private String secondaryHost; + private String primaryBrokerId; + + private String secondaryBrokerId; private NamespaceBundleFactory nsFactory; @@ -156,6 +169,7 @@ void setup() throws Exception { config1.setLoadBalancerLoadSheddingStrategy("org.apache.pulsar.broker.loadbalance.impl.OverloadShedder"); config1.setClusterName("use"); config1.setWebServicePort(Optional.of(0)); + config1.setWebServicePortTls(Optional.of(0)); config1.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config1.setAdvertisedAddress("localhost"); @@ -163,11 +177,10 @@ void setup() throws Exception { config1.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); config1.setBrokerServicePort(Optional.of(0)); config1.setBrokerServicePortTls(Optional.of(0)); - config1.setWebServicePortTls(Optional.of(0)); pulsar1 = new PulsarService(config1); pulsar1.start(); - primaryHost = String.format("%s:%d", "localhost", pulsar1.getListenPortHTTP().get()); + primaryBrokerId = pulsar1.getBrokerId(); url1 = new URL(pulsar1.getWebServiceAddress()); admin1 = PulsarAdmin.builder().serviceHttpUrl(url1.toString()).build(); @@ -177,13 +190,13 @@ void setup() throws Exception { config2.setLoadBalancerLoadSheddingStrategy("org.apache.pulsar.broker.loadbalance.impl.OverloadShedder"); config2.setClusterName("use"); config2.setWebServicePort(Optional.of(0)); + config2.setWebServicePortTls(Optional.of(0)); config2.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config2.setAdvertisedAddress("localhost"); config2.setBrokerShutdownTimeoutMs(0L); config2.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); config2.setBrokerServicePort(Optional.of(0)); config2.setBrokerServicePortTls(Optional.of(0)); - config2.setWebServicePortTls(Optional.of(0)); pulsar2 = new PulsarService(config2); pulsar2.start(); @@ -192,16 +205,16 @@ void setup() throws Exception { config.setLoadBalancerLoadSheddingStrategy("org.apache.pulsar.broker.loadbalance.impl.OverloadShedder"); config.setClusterName("use"); config.setWebServicePort(Optional.of(0)); + config.setWebServicePortTls(Optional.of(0)); config.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config.setAdvertisedAddress("localhost"); config.setBrokerShutdownTimeoutMs(0L); config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); config.setBrokerServicePort(Optional.of(0)); config.setBrokerServicePortTls(Optional.of(0)); - config.setWebServicePortTls(Optional.of(0)); pulsar3 = new PulsarService(config); - secondaryHost = String.format("%s:%d", "localhost", pulsar2.getListenPortHTTP().get()); + secondaryBrokerId = pulsar2.getBrokerId(); url2 = new URL(pulsar2.getWebServiceAddress()); admin2 = PulsarAdmin.builder().serviceHttpUrl(url2.toString()).build(); @@ -221,7 +234,7 @@ void shutdown() throws Exception { pulsar2.close(); pulsar1.close(); - + if (pulsar3.isRunning()) { pulsar3.close(); } @@ -252,7 +265,7 @@ public void testCandidateConsistency() throws Exception { for (int i = 0; i < 2; ++i) { final ServiceUnitId serviceUnit = makeBundle(Integer.toString(i)); final String broker = primaryLoadManager.selectBrokerForAssignment(serviceUnit).get(); - if (broker.equals(primaryHost)) { + if (broker.equals(primaryBrokerId)) { foundFirst = true; } else { foundSecond = true; @@ -268,12 +281,12 @@ public void testCandidateConsistency() throws Exception { LoadData loadData = (LoadData) getField(primaryLoadManager, "loadData"); // Make sure the second broker is not in the internal map. - Awaitility.await().untilAsserted(() -> assertFalse(loadData.getBrokerData().containsKey(secondaryHost))); + Awaitility.await().untilAsserted(() -> assertFalse(loadData.getBrokerData().containsKey(secondaryBrokerId))); // Try 5 more selections, ensure they all go to the first broker. for (int i = 2; i < 7; ++i) { final ServiceUnitId serviceUnit = makeBundle(Integer.toString(i)); - assertEquals(primaryLoadManager.selectBrokerForAssignment(serviceUnit), primaryHost); + assertEquals(primaryLoadManager.selectBrokerForAssignment(serviceUnit), primaryBrokerId); } } @@ -295,7 +308,7 @@ public void testEvenBundleDistribution() throws Exception { // one bundle. pulsar1.getLocalMetadataStore().getMetadataCache(BundleData.class).create(firstBundleDataPath, bundleData).join(); for (final NamespaceBundle bundle : bundles) { - if (primaryLoadManager.selectBrokerForAssignment(bundle).equals(primaryHost)) { + if (primaryLoadManager.selectBrokerForAssignment(bundle).equals(primaryBrokerId)) { ++numAssignedToPrimary; } else { ++numAssignedToSecondary; @@ -309,52 +322,52 @@ public void testEvenBundleDistribution() throws Exception { } - + @Test public void testBrokerAffinity() throws Exception { // Start broker 3 pulsar3.start(); - + final String tenant = "test"; final String cluster = "test"; String namespace = tenant + "/" + cluster + "/" + "test"; String topic = "persistent://" + namespace + "/my-topic1"; - admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl("http://" + pulsar1.getAdvertisedAddress()).build()); + admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()).build()); admin1.tenants().createTenant(tenant, new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet(cluster))); admin1.namespaces().createNamespace(namespace, 16); - + String topicLookup = admin1.lookups().lookupTopic(topic); String bundleRange = admin1.lookups().getBundleRange(topic); - + String brokerServiceUrl = pulsar1.getBrokerServiceUrl(); - String brokerUrl = pulsar1.getSafeWebServiceAddress(); + String brokerId = pulsar1.getBrokerId(); log.debug("initial broker service url - {}", topicLookup); Random rand=new Random(); - + if (topicLookup.equals(brokerServiceUrl)) { int x = rand.nextInt(2); if (x == 0) { - brokerUrl = pulsar2.getSafeWebServiceAddress(); + brokerId = pulsar2.getBrokerId(); brokerServiceUrl = pulsar2.getBrokerServiceUrl(); } else { - brokerUrl = pulsar3.getSafeWebServiceAddress(); + brokerId = pulsar3.getBrokerId(); brokerServiceUrl = pulsar3.getBrokerServiceUrl(); } } - brokerUrl = brokerUrl.replaceFirst("http[s]?://", ""); - log.debug("destination broker service url - {}, broker url - {}", brokerServiceUrl, brokerUrl); - String leaderServiceUrl = admin1.brokers().getLeaderBroker().getServiceUrl(); - log.debug("leader serviceUrl - {}, broker1 service url - {}", leaderServiceUrl, pulsar1.getSafeWebServiceAddress()); - //Make a call to broker which is not a leader - if (!leaderServiceUrl.equals(pulsar1.getSafeWebServiceAddress())) { - admin1.namespaces().unloadNamespaceBundle(namespace, bundleRange, brokerUrl); + log.debug("destination broker service url - {}, broker url - {}", brokerServiceUrl, brokerId); + String leaderBrokerId = admin1.brokers().getLeaderBroker().getBrokerId(); + log.debug("leader lookup address - {}, broker1 lookup address - {}", leaderBrokerId, + pulsar1.getBrokerId()); + // Make a call to broker which is not a leader + if (!leaderBrokerId.equals(pulsar1.getBrokerId())) { + admin1.namespaces().unloadNamespaceBundle(namespace, bundleRange, brokerId); } else { - admin2.namespaces().unloadNamespaceBundle(namespace, bundleRange, brokerUrl); + admin2.namespaces().unloadNamespaceBundle(namespace, bundleRange, brokerId); } - + sleep(2000); String topicLookupAfterUnload = admin1.lookups().lookupTopic(topic); log.debug("final broker service url - {}", topicLookupAfterUnload); @@ -426,9 +439,9 @@ public void testLoadShedding() throws Exception { pulsar1.getConfiguration().setLoadBalancerEnabled(true); final LoadData loadData = (LoadData) getField(primaryLoadManagerSpy, "loadData"); final Map brokerDataMap = loadData.getBrokerData(); - final BrokerData brokerDataSpy1 = spy(brokerDataMap.get(primaryHost)); + final BrokerData brokerDataSpy1 = spy(brokerDataMap.get(primaryBrokerId)); when(brokerDataSpy1.getLocalData()).thenReturn(localBrokerData); - brokerDataMap.put(primaryHost, brokerDataSpy1); + brokerDataMap.put(primaryBrokerId, brokerDataSpy1); // Need to update all the bundle data for the shredder to see the spy. primaryLoadManagerSpy.handleDataNotification(new Notification(NotificationType.Created, LoadManager.LOADBALANCE_BROKERS_ROOT + "/broker:8080")); @@ -446,7 +459,7 @@ public void testLoadShedding() throws Exception { verify(namespacesSpy1, Mockito.times(1)) .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); assertEquals(bundleReference.get(), mockBundleName(2)); - assertEquals(selectedBrokerRef.get().get(), secondaryHost); + assertEquals(selectedBrokerRef.get().get(), secondaryBrokerId); primaryLoadManagerSpy.doLoadShedding(); // Now less expensive bundle will be unloaded (normally other bundle would move off and nothing would be @@ -454,13 +467,13 @@ public void testLoadShedding() throws Exception { verify(namespacesSpy1, Mockito.times(2)) .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); assertEquals(bundleReference.get(), mockBundleName(1)); - assertEquals(selectedBrokerRef.get().get(), secondaryHost); + assertEquals(selectedBrokerRef.get().get(), secondaryBrokerId); primaryLoadManagerSpy.doLoadShedding(); // Now both are in grace period: neither should be unloaded. verify(namespacesSpy1, Mockito.times(2)) .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); - assertEquals(selectedBrokerRef.get().get(), secondaryHost); + assertEquals(selectedBrokerRef.get().get(), secondaryBrokerId); // Test bundle transfer to same broker @@ -469,13 +482,11 @@ public void testLoadShedding() throws Exception { verify(namespacesSpy1, Mockito.times(3)) .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); - doReturn(Optional.of(primaryHost)).when(primaryLoadManagerSpy).selectBroker(any()); loadData.getRecentlyUnloadedBundles().clear(); primaryLoadManagerSpy.doLoadShedding(); // The bundle shouldn't be unloaded because the broker is the same. - verify(namespacesSpy1, Mockito.times(3)) + verify(namespacesSpy1, Mockito.times(4)) .unloadNamespaceBundle(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); - } // Test that ModularLoadManagerImpl will determine that writing local data to ZooKeeper is necessary if certain @@ -592,10 +603,12 @@ public void testNamespaceIsolationPoliciesForPrimaryAndSecondaryBrokers() throws final String tenant = "my-property"; final String cluster = "use"; final String namespace = "my-ns"; - final String broker1Address = pulsar1.getAdvertisedAddress() + "0"; - final String broker2Address = pulsar2.getAdvertisedAddress() + "1"; - final String sharedBroker = "broker3"; - admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl("http://" + pulsar1.getAdvertisedAddress()).build()); + String broker1Host = pulsar1.getAdvertisedAddress() + "0"; + final String broker1Address = broker1Host + ":8080"; + String broker2Host = pulsar2.getAdvertisedAddress() + "1"; + final String broker2Address = broker2Host + ":8080"; + final String sharedBroker = "broker3:8080"; + admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()).build()); admin1.tenants().createTenant(tenant, new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet(cluster))); admin1.namespaces().createNamespace(tenant + "/" + cluster + "/" + namespace); @@ -603,8 +616,8 @@ public void testNamespaceIsolationPoliciesForPrimaryAndSecondaryBrokers() throws // set a new policy String newPolicyJsonTemplate = "{\"namespaces\":[\"%s/%s/%s.*\"],\"primary\":[\"%s\"]," + "\"secondary\":[\"%s\"],\"auto_failover_policy\":{\"policy_type\":\"min_available\",\"parameters\":{\"min_limit\":%s,\"usage_threshold\":80}}}"; - String newPolicyJson = String.format(newPolicyJsonTemplate, tenant, cluster, namespace, broker1Address, - broker2Address, 1); + String newPolicyJson = String.format(newPolicyJsonTemplate, tenant, cluster, namespace, broker1Host, + broker2Host, 1); String newPolicyName = "my-ns-isolation-policies"; ObjectMapper jsonMapper = ObjectMapperFactory.create(); NamespaceIsolationDataImpl nsPolicyData = jsonMapper.readValue(newPolicyJson.getBytes(), @@ -616,12 +629,12 @@ public void testNamespaceIsolationPoliciesForPrimaryAndSecondaryBrokers() throws ServiceUnitId serviceUnit = LoadBalancerTestingUtils.makeBundles(nsFactory, tenant, cluster, namespace, 1)[0]; BrokerTopicLoadingPredicate brokerTopicLoadingPredicate = new BrokerTopicLoadingPredicate() { @Override - public boolean isEnablePersistentTopics(String brokerUrl) { + public boolean isEnablePersistentTopics(String brokerId) { return true; } @Override - public boolean isEnableNonPersistentTopics(String brokerUrl) { + public boolean isEnableNonPersistentTopics(String brokerId) { return true; } }; @@ -653,8 +666,8 @@ public boolean isEnableNonPersistentTopics(String brokerUrl) { // (2) now we will have isolation policy : primary=broker1, secondary=broker2, minLimit=2 - newPolicyJson = String.format(newPolicyJsonTemplate, tenant, cluster, namespace, broker1Address, - broker2Address, 2); + newPolicyJson = String.format(newPolicyJsonTemplate, tenant, cluster, namespace, broker1Host, + broker2Host, 2); nsPolicyData = jsonMapper.readValue(newPolicyJson.getBytes(), NamespaceIsolationDataImpl.class); admin1.clusters().createNamespaceIsolationPolicy("use", newPolicyName, nsPolicyData); @@ -691,16 +704,18 @@ public void testLoadSheddingWithNamespaceIsolationPolicies() throws Exception { final String tenant = "my-tenant"; final String namespace = "my-tenant/use/my-ns"; final String bundle = "0x00000000_0xffffffff"; - final String brokerAddress = pulsar1.getAdvertisedAddress(); - final String broker1Address = pulsar1.getAdvertisedAddress() + 1; + final String brokerHost = pulsar1.getAdvertisedAddress(); + final String brokerAddress = brokerHost + ":8080"; + final String broker1Host = pulsar1.getAdvertisedAddress() + "1"; + final String broker1Address = broker1Host + ":8080"; - admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl("http://" + pulsar1.getAdvertisedAddress()).build()); + admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()).build()); admin1.tenants().createTenant(tenant, new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet(cluster))); admin1.namespaces().createNamespace(namespace); @Cleanup - PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(pulsar1.getSafeWebServiceAddress()).build(); + PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(pulsar1.getWebServiceAddress()).build(); Producer producer = pulsarClient.newProducer().topic("persistent://" + namespace + "/my-topic1") .create(); ModularLoadManagerImpl loadManager = (ModularLoadManagerImpl) ((ModularLoadManagerWrapper) pulsar1 @@ -709,12 +724,13 @@ public void testLoadSheddingWithNamespaceIsolationPolicies() throws Exception { loadManager.updateAll(); // test1: no isolation policy - assertTrue(loadManager.shouldNamespacePoliciesUnload(namespace, bundle, primaryHost)); + assertTrue(loadManager.shouldNamespacePoliciesUnload(namespace, bundle, primaryBrokerId)); // test2: as isolation policy, there are not another broker to load the bundle. String newPolicyJsonTemplate = "{\"namespaces\":[\"%s.*\"],\"primary\":[\"%s\"]," + "\"secondary\":[\"%s\"],\"auto_failover_policy\":{\"policy_type\":\"min_available\",\"parameters\":{\"min_limit\":%s,\"usage_threshold\":80}}}"; - String newPolicyJson = String.format(newPolicyJsonTemplate, namespace, broker1Address,broker1Address, 1); + + String newPolicyJson = String.format(newPolicyJsonTemplate, namespace, broker1Host, broker1Host, 1); String newPolicyName = "my-ns-isolation-policies"; ObjectMapper jsonMapper = ObjectMapperFactory.create(); NamespaceIsolationDataImpl nsPolicyData = jsonMapper.readValue(newPolicyJson.getBytes(), @@ -723,11 +739,11 @@ public void testLoadSheddingWithNamespaceIsolationPolicies() throws Exception { assertFalse(loadManager.shouldNamespacePoliciesUnload(namespace, bundle, broker1Address)); // test3: as isolation policy, there are another can load the bundle. - String newPolicyJson1 = String.format(newPolicyJsonTemplate, namespace, brokerAddress,brokerAddress, 1); + String newPolicyJson1 = String.format(newPolicyJsonTemplate, namespace, brokerHost, brokerHost, 1); NamespaceIsolationDataImpl nsPolicyData1 = jsonMapper.readValue(newPolicyJson1.getBytes(), NamespaceIsolationDataImpl.class); admin1.clusters().updateNamespaceIsolationPolicy(cluster, newPolicyName, nsPolicyData1); - assertTrue(loadManager.shouldNamespacePoliciesUnload(namespace, bundle, primaryHost)); + assertTrue(loadManager.shouldNamespacePoliciesUnload(namespace, bundle, primaryBrokerId)); producer.close(); } @@ -744,7 +760,7 @@ public void testOwnBrokerZnodeByMultipleBroker() throws Exception { ServiceConfiguration config = new ServiceConfiguration(); config.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); config.setClusterName("use"); - config.setWebServicePort(Optional.of(0)); + config.setWebServicePort(Optional.of(PortManager.nextLockedFreePort())); config.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort()); config.setBrokerShutdownTimeoutMs(0L); config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); @@ -752,10 +768,12 @@ public void testOwnBrokerZnodeByMultipleBroker() throws Exception { PulsarService pulsar = new PulsarService(config); // create znode using different zk-session final String brokerZnode = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + pulsar.getAdvertisedAddress() + ":" - + config.getWebServicePort(); - pulsar1.getLocalMetadataStore().put(brokerZnode, new byte[0], Optional.empty(), EnumSet.of(CreateOption.Ephemeral)).join(); + + config.getWebServicePort().get(); + pulsar1.getLocalMetadataStore() + .put(brokerZnode, new byte[0], Optional.empty(), EnumSet.of(CreateOption.Ephemeral)).join(); try { pulsar.start(); + fail("should have failed"); } catch (PulsarServerException e) { //Ok. } @@ -782,4 +800,168 @@ public void testRemoveDeadBrokerTimeAverageData() throws Exception { Awaitility.await().untilAsserted(() -> assertTrue(pulsar1.getLeaderElectionService().isLeader())); assertEquals(data.size(), 1); } + + @DataProvider(name = "isV1") + public Object[][] isV1() { + return new Object[][] {{true}, {false}}; + } + + @Test(dataProvider = "isV1") + public void testBundleDataDefaultValue(boolean isV1) throws Exception { + final String cluster = "use"; + final String tenant = "my-tenant"; + final String namespace = "my-ns"; + NamespaceName ns = isV1 ? NamespaceName.get(tenant, cluster, namespace) : NamespaceName.get(tenant, namespace); + admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()).build()); + admin1.tenants().createTenant(tenant, + new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet(cluster))); + admin1.namespaces().createNamespace(ns.toString(), 16); + + // set resourceQuota to the first bundle range. + BundlesData bundlesData = admin1.namespaces().getBundles(ns.toString()); + NamespaceBundle namespaceBundle = nsFactory.getBundle(ns, + Range.range(Long.decode(bundlesData.getBoundaries().get(0)), BoundType.CLOSED, Long.decode(bundlesData.getBoundaries().get(1)), + BoundType.OPEN)); + ResourceQuota quota = new ResourceQuota(); + quota.setMsgRateIn(1024.1); + quota.setMsgRateOut(1024.2); + quota.setBandwidthIn(1024.3); + quota.setBandwidthOut(1024.4); + quota.setMemory(1024.0); + admin1.resourceQuotas().setNamespaceBundleResourceQuota(ns.toString(), namespaceBundle.getBundleRange(), quota); + + ModularLoadManagerWrapper loadManagerWrapper = (ModularLoadManagerWrapper) pulsar1.getLoadManager().get(); + ModularLoadManagerImpl lm = (ModularLoadManagerImpl) loadManagerWrapper.getLoadManager(); + + // get the bundleData of the first bundle range. + // The default value of the bundleData be the same as resourceQuota because the resourceQuota is present. + BundleData defaultBundleData = lm.getBundleDataOrDefault(namespaceBundle.toString()); + + TimeAverageMessageData shortTermData = defaultBundleData.getShortTermData(); + TimeAverageMessageData longTermData = defaultBundleData.getLongTermData(); + assertEquals(shortTermData.getMsgRateIn(), 1024.1); + assertEquals(shortTermData.getMsgRateOut(), 1024.2); + assertEquals(shortTermData.getMsgThroughputIn(), 1024.3); + assertEquals(shortTermData.getMsgThroughputOut(), 1024.4); + + assertEquals(longTermData.getMsgRateIn(), 1024.1); + assertEquals(longTermData.getMsgRateOut(), 1024.2); + assertEquals(longTermData.getMsgThroughputIn(), 1024.3); + assertEquals(longTermData.getMsgThroughputOut(), 1024.4); + } + + + @Test + public void testRemoveNonExistBundleData() + throws PulsarAdminException, InterruptedException, + PulsarClientException, PulsarServerException, NoSuchFieldException, IllegalAccessException { + final String cluster = "use"; + final String tenant = "my-tenant"; + final String namespace = "remove-non-exist-bundle-data-ns"; + final String topicName = tenant + "/" + namespace + "/" + "topic"; + int bundleNumbers = 8; + + admin1.clusters().createCluster(cluster, ClusterData.builder().serviceUrl(pulsar1.getWebServiceAddress()).build()); + admin1.tenants().createTenant(tenant, + new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet(cluster))); + admin1.namespaces().createNamespace(tenant + "/" + namespace, bundleNumbers); + + @Cleanup + PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(pulsar1.getBrokerServiceUrl()).build(); + + // create a lot of topic to fully distributed among bundles. + for (int i = 0; i < 10; i++) { + String topicNameI = topicName + i; + admin1.topics().createPartitionedTopic(topicNameI, 20); + // trigger bundle assignment + + pulsarClient.newConsumer().topic(topicNameI) + .subscriptionName("my-subscriber-name2").subscribe(); + } + + ModularLoadManagerWrapper loadManagerWrapper = (ModularLoadManagerWrapper) pulsar1.getLoadManager().get(); + ModularLoadManagerImpl lm1 = (ModularLoadManagerImpl) loadManagerWrapper.getLoadManager(); + ModularLoadManagerWrapper loadManager2 = (ModularLoadManagerWrapper) pulsar2.getLoadManager().get(); + ModularLoadManagerImpl lm2 = (ModularLoadManagerImpl) loadManager2.getLoadManager(); + + Field executors = lm1.getClass().getDeclaredField("executors"); + executors.setAccessible(true); + ExecutorService executorService = (ExecutorService) executors.get(lm1); + + assertEquals(lm1.getAvailableBrokers().size(), 2); + + pulsar1.getBrokerService().updateRates(); + pulsar2.getBrokerService().updateRates(); + + lm1.writeBrokerDataOnZooKeeper(true); + lm2.writeBrokerDataOnZooKeeper(true); + + // wait for metadata store notification finish + CountDownLatch latch = new CountDownLatch(1); + executorService.submit(latch::countDown); + latch.await(); + + loadManagerWrapper.writeResourceQuotasToZooKeeper(); + + MetadataCache bundlesCache = pulsar1.getLocalMetadataStore().getMetadataCache(BundleData.class); + + // trigger bundle split + String topicToFindBundle = topicName + 0; + NamespaceBundle bundleWillBeSplit = pulsar1.getNamespaceService().getBundle(TopicName.get(topicToFindBundle)); + + final Optional leastLoaded = loadManagerWrapper.getLeastLoaded(bundleWillBeSplit); + assertFalse(leastLoaded.isEmpty()); + + String bundleDataPath = ModularLoadManagerImpl.BUNDLE_DATA_PATH + "/" + tenant + "/" + namespace; + CompletableFuture> children = bundlesCache.getChildren(bundleDataPath); + List bundles = children.join(); + assertTrue(bundles.contains(bundleWillBeSplit.getBundleRange())); + + // after updateAll no namespace bundle data is deleted from metadata store. + lm1.updateAll(); + + children = bundlesCache.getChildren(bundleDataPath); + bundles = children.join(); + assertFalse(bundles.isEmpty()); + assertEquals(bundleNumbers, bundles.size()); + + NamespaceName namespaceName = NamespaceName.get(tenant, namespace); + pulsar1.getAdminClient().namespaces().splitNamespaceBundle(tenant + "/" + namespace, + bundleWillBeSplit.getBundleRange(), + false, NamespaceBundleSplitAlgorithm.RANGE_EQUALLY_DIVIDE_NAME); + + NamespaceBundles allBundlesAfterSplit = + pulsar1.getNamespaceService().getNamespaceBundleFactory() + .getBundles(namespaceName); + + assertFalse(allBundlesAfterSplit.getBundles().contains(bundleWillBeSplit)); + + // the bundle data should be deleted + + pulsar1.getBrokerService().updateRates(); + pulsar2.getBrokerService().updateRates(); + + lm1.writeBrokerDataOnZooKeeper(true); + lm2.writeBrokerDataOnZooKeeper(true); + + latch = new CountDownLatch(1); + // wait for metadata store notification finish + CountDownLatch finalLatch = latch; + executorService.submit(finalLatch::countDown); + latch.await(); + + loadManagerWrapper.writeResourceQuotasToZooKeeper(); + + lm1.updateAll(); + + log.info("update all triggered."); + + // check bundle data should be deleted from metadata store. + + CompletableFuture> childrenAfterSplit = bundlesCache.getChildren(bundleDataPath); + List bundlesAfterSplit = childrenAfterSplit.join(); + + assertFalse(bundlesAfterSplit.contains(bundleWillBeSplit.getBundleRange())); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedderTest.java index 00182fffb8a31..4b4042cf31a72 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/UniformLoadShedderTest.java @@ -26,6 +26,7 @@ import org.testng.annotations.Test; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; @Test(groups = "broker") public class UniformLoadShedderTest { @@ -119,4 +120,81 @@ public void testBrokerWithMultipleBundles() { assertFalse(bundlesToUnload.isEmpty()); } + @Test + public void testOverloadBrokerSelect() { + conf.setMaxUnloadBundleNumPerShedding(1); + conf.setMaxUnloadPercentage(0.5); + int numBrokers = 5; + int numBundles = 5; + LoadData loadData = new LoadData(); + + LocalBrokerData[] localBrokerDatas = new LocalBrokerData[]{ + new LocalBrokerData(), + new LocalBrokerData(), + new LocalBrokerData(), + new LocalBrokerData(), + new LocalBrokerData()}; + + String[] brokerNames = new String[]{"broker0", "broker1", "broker2", "broker3", "broker4"}; + + double[] brokerMsgRates = new double[]{ + 50000, // broker0 + 60000, // broker1 + 70000, // broker2 + 10000, // broker3 + 20000};// broker4 + + double[] brokerMsgThroughputs = new double[]{ + 50 * 1024 * 1024, // broker0 + 60 * 1024 * 1024, // broker1 + 70 * 1024 * 1024, // broker2 + 80 * 1024 * 1024, // broker3 + 10 * 1024 * 1024};// broker4 + + + for (int brokerId = 0; brokerId < numBrokers; brokerId++) { + double msgRate = brokerMsgRates[brokerId] / numBundles; + double throughput = brokerMsgThroughputs[brokerId] / numBundles; + for (int i = 0; i < numBundles; ++i) { + String bundleName = "broker-" + brokerId + "-bundle-" + i; + localBrokerDatas[brokerId].getBundles().add(bundleName); + localBrokerDatas[brokerId].setMsgRateIn(brokerMsgRates[brokerId]); + localBrokerDatas[brokerId].setMsgThroughputIn(brokerMsgThroughputs[brokerId]); + BundleData bundle = new BundleData(); + + TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData(); + timeAverageMessageData.setMsgRateIn(msgRate); + timeAverageMessageData.setMsgThroughputIn(throughput); + bundle.setShortTermData(timeAverageMessageData); + loadData.getBundleData().put(bundleName, bundle); + } + loadData.getBrokerData().put(brokerNames[brokerId], new BrokerData(localBrokerDatas[brokerId])); + } + + // disable throughput based load shedding, enable rate based load shedding only + conf.setLoadBalancerMsgRateDifferenceShedderThreshold(50); + conf.setLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(0); + + Multimap bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf); + assertEquals(bundlesToUnload.size(), 1); + assertTrue(bundlesToUnload.containsKey("broker2")); + + + // disable rate based load shedding, enable throughput based load shedding only + conf.setLoadBalancerMsgRateDifferenceShedderThreshold(0); + conf.setLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(2); + + bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf); + assertEquals(bundlesToUnload.size(), 1); + assertTrue(bundlesToUnload.containsKey("broker3")); + + // enable both rate and throughput based load shedding, but rate based load shedding has higher priority + conf.setLoadBalancerMsgRateDifferenceShedderThreshold(50); + conf.setLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(2); + + bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf); + assertEquals(bundlesToUnload.size(), 1); + assertTrue(bundlesToUnload.containsKey("broker2")); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java index ac5d92c880227..f3fabc910346d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java @@ -349,14 +349,14 @@ public void testUnloadNamespaceBundleWithStuckTopic() throws Exception { @Test public void testLoadReportDeserialize() throws Exception { - final String candidateBroker1 = "http://localhost:8000"; - final String candidateBroker2 = "http://localhost:3000"; - LoadReport lr = new LoadReport(null, null, candidateBroker1, null); - LocalBrokerData ld = new LocalBrokerData(null, null, candidateBroker2, null); - URI uri1 = new URI(candidateBroker1); - URI uri2 = new URI(candidateBroker2); - String path1 = String.format("%s/%s:%s", LoadManager.LOADBALANCE_BROKERS_ROOT, uri1.getHost(), uri1.getPort()); - String path2 = String.format("%s/%s:%s", LoadManager.LOADBALANCE_BROKERS_ROOT, uri2.getHost(), uri2.getPort()); + final String candidateBroker1 = "localhost:8000"; + String broker1Url = "pulsar://localhost:6650"; + final String candidateBroker2 = "localhost:3000"; + String broker2Url = "pulsar://localhost:6660"; + LoadReport lr = new LoadReport("http://" + candidateBroker1, null, broker1Url, null); + LocalBrokerData ld = new LocalBrokerData("http://" + candidateBroker2, null, broker2Url, null); + String path1 = String.format("%s/%s", LoadManager.LOADBALANCE_BROKERS_ROOT, candidateBroker1); + String path2 = String.format("%s/%s", LoadManager.LOADBALANCE_BROKERS_ROOT, candidateBroker2); pulsar.getLocalMetadataStore().put(path1, ObjectMapperFactory.getMapper().writer().writeValueAsBytes(lr), @@ -375,23 +375,23 @@ public void testLoadReportDeserialize() throws Exception { .getAndSet(new ModularLoadManagerWrapper(new ModularLoadManagerImpl())); oldLoadManager.stop(); LookupResult result2 = pulsar.getNamespaceService().createLookupResult(candidateBroker2, false, null).get(); - Assert.assertEquals(result1.getLookupData().getBrokerUrl(), candidateBroker1); - Assert.assertEquals(result2.getLookupData().getBrokerUrl(), candidateBroker2); + Assert.assertEquals(result1.getLookupData().getBrokerUrl(), broker1Url); + Assert.assertEquals(result2.getLookupData().getBrokerUrl(), broker2Url); System.out.println(result2); } @Test public void testCreateLookupResult() throws Exception { - final String candidateBroker = "pulsar://localhost:6650"; + final String candidateBroker = "localhost:8080"; + final String brokerUrl = "pulsar://localhost:6650"; final String listenerUrl = "pulsar://localhost:7000"; final String listenerUrlTls = "pulsar://localhost:8000"; final String listener = "listenerName"; Map advertisedListeners = new HashMap<>(); advertisedListeners.put(listener, AdvertisedListener.builder().brokerServiceUrl(new URI(listenerUrl)).brokerServiceUrlTls(new URI(listenerUrlTls)).build()); - LocalBrokerData ld = new LocalBrokerData(null, null, candidateBroker, null, advertisedListeners); - URI uri = new URI(candidateBroker); - String path = String.format("%s/%s:%s", LoadManager.LOADBALANCE_BROKERS_ROOT, uri.getHost(), uri.getPort()); + LocalBrokerData ld = new LocalBrokerData("http://" + candidateBroker, null, brokerUrl, null, advertisedListeners); + String path = String.format("%s/%s", LoadManager.LOADBALANCE_BROKERS_ROOT, candidateBroker); pulsar.getLocalMetadataStore().put(path, ObjectMapperFactory.getMapper().writer().writeValueAsBytes(ld), @@ -401,7 +401,7 @@ public void testCreateLookupResult() throws Exception { LookupResult noListener = pulsar.getNamespaceService().createLookupResult(candidateBroker, false, null).get(); LookupResult withListener = pulsar.getNamespaceService().createLookupResult(candidateBroker, false, listener).get(); - Assert.assertEquals(noListener.getLookupData().getBrokerUrl(), candidateBroker); + Assert.assertEquals(noListener.getLookupData().getBrokerUrl(), brokerUrl); Assert.assertEquals(withListener.getLookupData().getBrokerUrl(), listenerUrl); Assert.assertEquals(withListener.getLookupData().getBrokerUrlTls(), listenerUrlTls); System.out.println(withListener); @@ -683,7 +683,7 @@ public void testSplitBundleWithHighestThroughput() throws Exception { @Test public void testHeartbeatNamespaceMatch() throws Exception { - NamespaceName namespaceName = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), conf); + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespace(pulsar.getBrokerId(), conf); NamespaceBundle namespaceBundle = pulsar.getNamespaceService().getNamespaceBundleFactory().getFullBundle(namespaceName); assertTrue(NamespaceService.isSystemServiceNamespace( NamespaceBundle.getBundleNamespace(namespaceBundle.toString()))); @@ -704,7 +704,7 @@ public void testModularLoadManagerRemoveInactiveBundleFromLoadData() throws Exce Field loadManagerField = NamespaceService.class.getDeclaredField("loadManager"); loadManagerField.setAccessible(true); doReturn(true).when(loadManager).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getSafeWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getBrokerId(), null); Optional res = Optional.of(resourceUnit); doReturn(res).when(loadManager).getLeastLoaded(any(ServiceUnitId.class)); loadManagerField.set(pulsar.getNamespaceService(), new AtomicReference<>(loadManager)); @@ -834,10 +834,7 @@ private void waitResourceDataUpdateToZK(LoadManager loadManager) throws Exceptio public CompletableFuture registryBrokerDataChangeNotice() { CompletableFuture completableFuture = new CompletableFuture<>(); - String lookupServiceAddress = pulsar.getAdvertisedAddress() + ":" - + (conf.getWebServicePort().isPresent() ? conf.getWebServicePort().get() - : conf.getWebServicePortTls().get()); - String brokerDataPath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + lookupServiceAddress; + String brokerDataPath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + pulsar.getBrokerId(); pulsar.getLocalMetadataStore().registerListener(notice -> { if (brokerDataPath.equals(notice.getPath())){ if (!completableFuture.isDone()) { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceUnloadingTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceUnloadingTest.java index 0dbfe1760879a..1526611874a62 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceUnloadingTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceUnloadingTest.java @@ -22,8 +22,12 @@ import com.google.common.collect.Sets; +import lombok.Cleanup; import org.apache.pulsar.broker.service.BrokerTestBase; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -34,6 +38,9 @@ public class NamespaceUnloadingTest extends BrokerTestBase { @BeforeMethod @Override protected void setup() throws Exception { + conf.setTopicLevelPoliciesEnabled(true); + conf.setForceDeleteNamespaceAllowed(true); + conf.setTopicLoadTimeoutSeconds(Integer.MAX_VALUE); super.baseSetup(); } @@ -68,4 +75,26 @@ public void testUnloadPartiallyLoadedNamespace() throws Exception { producer.close(); } + @Test + public void testUnloadWithTopicCreation() throws PulsarAdminException, PulsarClientException { + final String namespaceName = "prop/ns_unloading"; + final String topicName = "persistent://prop/ns_unloading/with_topic_creation"; + final int partitions = 5; + admin.namespaces().createNamespace(namespaceName, 1); + admin.topics().createPartitionedTopic(topicName, partitions); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.INT32) + .topic(topicName) + .create(); + + for (int i = 0; i < 100; i++) { + admin.namespaces().unloadNamespaceBundle(namespaceName, "0x00000000_0xffffffff"); + } + + for (int i = 0; i < partitions; i++) { + producer.send(i); + } + admin.namespaces().deleteNamespace(namespaceName, true); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipCacheForCurrentServerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipCacheForCurrentServerTest.java index e53d5c25bd2b5..22fa2c32b5655 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipCacheForCurrentServerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipCacheForCurrentServerTest.java @@ -76,7 +76,7 @@ public void testCreateTopicWithNotTopicNsOwnedBroker() { int verifiedBrokerNum = 0; for (PulsarService pulsarService : this.getPulsarServiceList()) { BrokerService bs = pulsarService.getBrokerService(); - if (bs.isTopicNsOwnedByBroker(TopicName.get(topicName))) { + if (bs.isTopicNsOwnedByBrokerAsync(TopicName.get(topicName)).join()) { continue; } verifiedBrokerNum ++; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java index 8dd4f53db8240..46e8989ac3df4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java @@ -80,10 +80,8 @@ protected void startBroker() throws Exception { conf.setBrokerShutdownTimeoutMs(0L); conf.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); conf.setBrokerServicePort(Optional.of(0)); - conf.setBrokerServicePortTls(Optional.of(0)); conf.setAdvertisedAddress("localhost"); conf.setWebServicePort(Optional.of(0)); - conf.setWebServicePortTls(Optional.of(0)); serviceConfigurationList.add(conf); PulsarTestContext.Builder testContextBuilder = diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupRateLimiterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupRateLimiterTest.java index fed827b1517e6..9efd2c109f355 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupRateLimiterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupRateLimiterTest.java @@ -21,23 +21,30 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; - +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiFunction; +import java.util.function.Function; +import lombok.Cleanup; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.pulsar.broker.service.BrokerTestBase; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.util.RateLimiter; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - public class ResourceGroupRateLimiterTest extends BrokerTestBase { final String rgName = "testRG"; @@ -147,6 +154,76 @@ public void testResourceGroupPublishRateLimit() throws Exception { testRateLimit(); } + @Test + public void testWithConcurrentUpdate() throws Exception { + cleanup(); + setup(); + createResourceGroup(rgName, testAddRg); + admin.namespaces().setNamespaceResourceGroup(namespaceName, rgName); + + Awaitility.await().untilAsserted(() -> + assertNotNull(pulsar.getResourceGroupServiceManager() + .getNamespaceResourceGroup(NamespaceName.get(namespaceName)))); + + Awaitility.await().untilAsserted(() -> + assertNotNull(pulsar.getResourceGroupServiceManager() + .resourceGroupGet(rgName).getResourceGroupPublishLimiter())); + + ResourceGroupPublishLimiter resourceGroupPublishLimiter = Mockito.spy(pulsar.getResourceGroupServiceManager() + .resourceGroupGet(rgName).getResourceGroupPublishLimiter()); + + AtomicBoolean blocking = new AtomicBoolean(false); + BiFunction, Long, Boolean> blockFunc = (function, acquirePermit) -> { + blocking.set(true); + while (blocking.get()) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + return function.apply(acquirePermit); + }; + + Mockito.doAnswer(invocation -> { + RateLimiter publishRateLimiterOnMessage = + (RateLimiter) FieldUtils.readDeclaredField(resourceGroupPublishLimiter, + "publishRateLimiterOnMessage", true); + RateLimiter publishRateLimiterOnByte = + (RateLimiter) FieldUtils.readDeclaredField(resourceGroupPublishLimiter, + "publishRateLimiterOnByte", true); + int numbers = invocation.getArgument(0); + long bytes = invocation.getArgument(1); + return (publishRateLimiterOnMessage == null || publishRateLimiterOnMessage.tryAcquire(numbers)) + && (publishRateLimiterOnByte == null || blockFunc.apply(publishRateLimiterOnByte::tryAcquire, bytes)); + }).when(resourceGroupPublishLimiter).tryAcquire(Mockito.anyInt(), Mockito.anyLong()); + + ConcurrentHashMap resourceGroupsMap = + (ConcurrentHashMap) FieldUtils.readDeclaredField(pulsar.getResourceGroupServiceManager(), + "resourceGroupsMap", true); + FieldUtils.writeDeclaredField(resourceGroupsMap.get(rgName), "resourceGroupPublishLimiter", + resourceGroupPublishLimiter, true); + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(namespaceName + "/test-topic") + .create(); + + CompletableFuture sendFuture = producer.sendAsync(new byte[MESSAGE_SIZE]); + + Awaitility.await().untilAsserted(() -> Assert.assertTrue(blocking.get())); + + testAddRg.setPublishRateInBytes(Long.valueOf(MESSAGE_SIZE) + 1); + admin.resourcegroups().updateResourceGroup(rgName, testAddRg); + blocking.set(false); + + sendFuture.join(); + + // Now detach the namespace + admin.namespaces().removeNamespaceResourceGroup(namespaceName); + deleteResourceGroup(rgName); + } + + private void prepareData() { testAddRg.setPublishRateInBytes(Long.valueOf(MESSAGE_SIZE)); testAddRg.setPublishRateInMsgs(1); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractReplicatorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractReplicatorTest.java index 294a9b341ec69..f8034c37971cc 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractReplicatorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractReplicatorTest.java @@ -40,6 +40,7 @@ import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.ProducerBuilder; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.awaitility.Awaitility; @@ -62,8 +63,11 @@ public void testRetryStartProducerStoppedByTopicRemove() throws Exception { final PulsarService pulsar = mock(PulsarService.class); final BrokerService broker = mock(BrokerService.class); final Topic localTopic = mock(Topic.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); final PulsarClientImpl localClient = mock(PulsarClientImpl.class); + when(localClient.getCnxPool()).thenReturn(connectionPool); final PulsarClientImpl remoteClient = mock(PulsarClientImpl.class); + when(remoteClient.getCnxPool()).thenReturn(connectionPool); final ProducerBuilder producerBuilder = mock(ProducerBuilder.class); final ConcurrentOpenHashMap>> topics = new ConcurrentOpenHashMap<>(); when(broker.executor()).thenReturn(eventLoopGroup); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AdvertisedAddressTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AdvertisedAddressTest.java index 554c663850fd9..19e40ebf9960f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AdvertisedAddressTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AdvertisedAddressTest.java @@ -75,7 +75,7 @@ public void testAdvertisedAddress() throws Exception { Assert.assertEquals( pulsar.getAdvertisedAddress(), advertisedAddress ); Assert.assertEquals( pulsar.getBrokerServiceUrl(), String.format("pulsar://%s:%d", advertisedAddress, pulsar.getBrokerListenPort().get()) ); Assert.assertEquals( pulsar.getSafeWebServiceAddress(), String.format("http://%s:%d", advertisedAddress, pulsar.getListenPortHTTP().get()) ); - String brokerZkPath = String.format("/loadbalance/brokers/%s:%d", pulsar.getAdvertisedAddress(), pulsar.getListenPortHTTP().get()); + String brokerZkPath = String.format("/loadbalance/brokers/%s", pulsar.getBrokerId()); String bkBrokerData = new String(bkEnsemble.getZkClient().getData(brokerZkPath, false, new Stat()), StandardCharsets.UTF_8); JsonObject jsonBkBrokerData = new Gson().fromJson(bkBrokerData, JsonObject.class); Assert.assertEquals( jsonBkBrokerData.get("pulsarServiceUrl").getAsString(), pulsar.getBrokerServiceUrl() ); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java index 433f5e56d952d..8e902d5d1e700 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java @@ -18,8 +18,17 @@ */ package org.apache.pulsar.broker.service; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; +import com.carrotsearch.hppc.ObjectSet; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.UUID; @@ -27,19 +36,32 @@ import java.util.concurrent.TimeUnit; import lombok.Cleanup; import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.Entry; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; +import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.ConsumerBuilder; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.BatchMessageIdImpl; +import org.apache.pulsar.client.impl.ConsumerImpl; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.collections.BitSetRecyclable; import org.awaitility.Awaitility; +import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; +@Slf4j @Test(groups = "broker") public class BatchMessageWithBatchIndexLevelTest extends BatchMessageTest { @@ -280,4 +302,370 @@ public void testAckMessageWithNotOwnerConsumerUnAckMessageCount() throws Excepti Awaitility.await().until(() -> getPulsar().getBrokerService().getTopic(topicName, false) .get().get().getSubscription(subName).getConsumers().get(0).getUnackedMessages() == 0); } + + @Test + public void testNegativeAckAndLongAckDelayWillNotLeadRepeatConsume() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://prop/ns-abc/tp_"); + final String subscriptionName = "s1"; + final int redeliveryDelaySeconds = 2; + + // Create producer and consumer. + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .enableBatching(true) + .batchingMaxMessages(1000) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .create(); + ConsumerImpl consumer = (ConsumerImpl) pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName(subscriptionName) + .subscriptionType(SubscriptionType.Shared) + .negativeAckRedeliveryDelay(redeliveryDelaySeconds, TimeUnit.SECONDS) + .enableBatchIndexAcknowledgment(true) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .acknowledgmentGroupTime(1, TimeUnit.HOURS) + .subscribe(); + + // Send 10 messages in batch. + ArrayList messagesSent = new ArrayList<>(); + List> sendTasks = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + String msg = Integer.valueOf(i).toString(); + sendTasks.add(producer.sendAsync(Integer.valueOf(i).toString())); + messagesSent.add(msg); + } + producer.flush(); + FutureUtil.waitForAll(sendTasks).join(); + + // Receive messages. + ArrayList messagesReceived = new ArrayList<>(); + // NegativeAck "batchMessageIdIndex1" once. + boolean index1HasBeenNegativeAcked = false; + while (true) { + Message message = consumer.receive(2, TimeUnit.SECONDS); + if (message == null) { + break; + } + if (index1HasBeenNegativeAcked) { + messagesReceived.add(message.getValue()); + consumer.acknowledge(message); + continue; + } + if (((MessageIdAdv) message.getMessageId()).getBatchIndex() == 1) { + consumer.negativeAcknowledge(message); + index1HasBeenNegativeAcked = true; + continue; + } + messagesReceived.add(message.getValue()); + consumer.acknowledge(message); + } + + // Receive negative acked messages. + // Wait the message negative acknowledgment finished. + int tripleRedeliveryDelaySeconds = redeliveryDelaySeconds * 3; + while (true) { + Message message = consumer.receive(tripleRedeliveryDelaySeconds, TimeUnit.SECONDS); + if (message == null) { + break; + } + messagesReceived.add(message.getValue()); + consumer.acknowledge(message); + } + + log.info("messagesSent: {}, messagesReceived: {}", messagesSent, messagesReceived); + Assert.assertEquals(messagesReceived.size(), messagesSent.size()); + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(topicName); + } + + @Test + public void testMixIndexAndNonIndexUnAckMessageCount() throws Exception { + final String topicName = "persistent://prop/ns-abc/testMixIndexAndNonIndexUnAckMessageCount-"; + + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topicName) + .enableBatching(true) + .batchingMaxPublishDelay(100, TimeUnit.MILLISECONDS) + .create(); + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(topicName) + .subscriptionName("sub") + .subscriptionType(SubscriptionType.Shared) + .acknowledgmentGroupTime(100, TimeUnit.MILLISECONDS) + .enableBatchIndexAcknowledgment(true) + .isAckReceiptEnabled(true) + .subscribe(); + + // send two batch messages: [(1), (2,3)] + producer.send("1".getBytes()); + producer.sendAsync("2".getBytes()); + producer.send("3".getBytes()); + + Message message1 = consumer.receive(); + Message message2 = consumer.receive(); + Message message3 = consumer.receive(); + consumer.acknowledgeAsync(message1); + consumer.acknowledge(message2); // send group ack: non-index ack for 1, index ack for 2 + consumer.acknowledge(message3); // index ack for 3 + + assertEquals(admin.topics().getStats(topicName).getSubscriptions() + .get("sub").getUnackedMessages(), 0); + } + + @Test + public void testUnAckMessagesWhenConcurrentDeliveryAndAck() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://prop/ns-abc/tp"); + final String subName = "s1"; + final int receiverQueueSize = 500; + admin.topics().createNonPartitionedTopic(topicName); + admin.topics().createSubscription(topicName, subName, MessageId.earliest); + ConsumerBuilder consumerBuilder = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .receiverQueueSize(receiverQueueSize) + .subscriptionName(subName) + .enableBatchIndexAcknowledgment(true) + .subscriptionType(SubscriptionType.Shared) + .isAckReceiptEnabled(true); + + // Send 100 messages. + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .enableBatching(true) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .create(); + CompletableFuture lastSent = null; + for (int i = 0; i < 100; i++) { + lastSent = producer.sendAsync(i + ""); + } + producer.flush(); + lastSent.join(); + + // When consumer1 is closed, may some messages are in the client memory(it they are being acked now). + Consumer consumer1 = consumerBuilder.consumerName("c1").subscribe(); + Message[] messagesInClientMemory = new Message[2]; + for (int i = 0; i < 2; i++) { + Message msg = consumer1.receive(2, TimeUnit.SECONDS); + assertNotNull(msg); + messagesInClientMemory[i] = msg; + } + ConsumerImpl consumer2 = (ConsumerImpl) consumerBuilder.consumerName("c2").subscribe(); + Awaitility.await().until(() -> consumer2.isConnected()); + + // The consumer2 will receive messages after consumer1 closed. + // Insert a delay mechanism to make the flow like below: + // 1. Close consumer1, then the 100 messages will be redelivered. + // 2. Read redeliver messages. No messages were acked at this time. + // 3. The in-flight ack of two messages is finished. + // 4. Send the messages to consumer2, consumer2 will get all the 100 messages. + CompletableFuture receiveMessageSignal2 = new CompletableFuture<>(); + org.apache.pulsar.broker.service.Consumer serviceConsumer2 = + makeConsumerReceiveMessagesDelay(topicName, subName, "c2", receiveMessageSignal2); + // step 1: close consumer. + consumer1.close(); + // step 2: wait for read messages from replay queue. + Thread.sleep(2 * 1000); + // step 3: wait for the in-flight ack. + BitSetRecyclable bitSetRecyclable = createBitSetRecyclable(100); + long ledgerId = 0, entryId = 0; + for (Message message : messagesInClientMemory) { + BatchMessageIdImpl msgId = (BatchMessageIdImpl) message.getMessageId(); + bitSetRecyclable.clear(msgId.getBatchIndex()); + ledgerId = msgId.getLedgerId(); + entryId = msgId.getEntryId(); + } + getCursor(topicName, subName).delete(PositionImpl.get(ledgerId, entryId, bitSetRecyclable.toLongArray())); + // step 4: send messages to consumer2. + receiveMessageSignal2.complete(null); + // Verify: Consumer2 will get all the 100 messages, and "unAckMessages" is 100. + List messages2 = new ArrayList<>(); + while (true) { + Message msg = consumer2.receive(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + messages2.add(msg); + } + assertEquals(messages2.size(), 100); + assertEquals(serviceConsumer2.getUnackedMessages(), 100); + // After the messages were pop out, the permits in the client memory went to 100. + Awaitility.await().untilAsserted(() -> { + assertEquals(serviceConsumer2.getAvailablePermits() + consumer2.getAvailablePermits(), + receiverQueueSize); + }); + + // cleanup. + producer.close(); + consumer2.close(); + admin.topics().delete(topicName, false); + } + + private BitSetRecyclable createBitSetRecyclable(int batchSize) { + BitSetRecyclable bitSetRecyclable = new BitSetRecyclable(batchSize); + for (int i = 0; i < batchSize; i++) { + bitSetRecyclable.set(i); + } + return bitSetRecyclable; + } + + private ManagedCursorImpl getCursor(String topic, String sub) { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topic, false).join().get(); + PersistentDispatcherMultipleConsumers dispatcher = + (PersistentDispatcherMultipleConsumers) persistentTopic.getSubscription(sub).getDispatcher(); + return (ManagedCursorImpl) dispatcher.getCursor(); + } + + /*** + * After {@param signal} complete, the consumer({@param consumerName}) start to receive messages. + */ + private org.apache.pulsar.broker.service.Consumer makeConsumerReceiveMessagesDelay(String topic, String sub, + String consumerName, + CompletableFuture signal) throws Exception { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topic, false).join().get(); + PersistentDispatcherMultipleConsumers dispatcher = + (PersistentDispatcherMultipleConsumers) persistentTopic.getSubscription(sub).getDispatcher(); + org.apache.pulsar.broker.service.Consumer serviceConsumer = null; + for (org.apache.pulsar.broker.service.Consumer c : dispatcher.getConsumers()){ + if (c.consumerName().equals(consumerName)) { + serviceConsumer = c; + break; + } + } + final org.apache.pulsar.broker.service.Consumer originalConsumer = serviceConsumer; + + // Insert a delay signal. + org.apache.pulsar.broker.service.Consumer spyServiceConsumer = spy(originalConsumer); + doAnswer(invocation -> { + List entries = (List) invocation.getArguments()[0]; + EntryBatchSizes batchSizes = (EntryBatchSizes) invocation.getArguments()[1]; + EntryBatchIndexesAcks batchIndexesAcks = (EntryBatchIndexesAcks) invocation.getArguments()[2]; + int totalMessages = (int) invocation.getArguments()[3]; + long totalBytes = (long) invocation.getArguments()[4]; + long totalChunkedMessages = (long) invocation.getArguments()[5]; + RedeliveryTracker redeliveryTracker = (RedeliveryTracker) invocation.getArguments()[6]; + return signal.thenApply(__ -> originalConsumer.sendMessages(entries, batchSizes, batchIndexesAcks, totalMessages, totalBytes, + totalChunkedMessages, redeliveryTracker)).join(); + }).when(spyServiceConsumer) + .sendMessages(anyList(), any(), any(), anyInt(), anyLong(), anyLong(), any()); + doAnswer(invocation -> { + List entries = (List) invocation.getArguments()[0]; + EntryBatchSizes batchSizes = (EntryBatchSizes) invocation.getArguments()[1]; + EntryBatchIndexesAcks batchIndexesAcks = (EntryBatchIndexesAcks) invocation.getArguments()[2]; + int totalMessages = (int) invocation.getArguments()[3]; + long totalBytes = (long) invocation.getArguments()[4]; + long totalChunkedMessages = (long) invocation.getArguments()[5]; + RedeliveryTracker redeliveryTracker = (RedeliveryTracker) invocation.getArguments()[6]; + long epoch = (long) invocation.getArguments()[7]; + return signal.thenApply(__ -> originalConsumer.sendMessages(entries, batchSizes, batchIndexesAcks, totalMessages, totalBytes, + totalChunkedMessages, redeliveryTracker, epoch)).join(); + }).when(spyServiceConsumer) + .sendMessages(anyList(), any(), any(), anyInt(), anyLong(), anyLong(), any(), anyLong()); + + // Replace the consumer. + Field fConsumerList = AbstractDispatcherMultipleConsumers.class.getDeclaredField("consumerList"); + Field fConsumerSet = AbstractDispatcherMultipleConsumers.class.getDeclaredField("consumerSet"); + fConsumerList.setAccessible(true); + fConsumerSet.setAccessible(true); + List consumerList = + (List) fConsumerList.get(dispatcher); + ObjectSet consumerSet = + (ObjectSet) fConsumerSet.get(dispatcher); + + consumerList.remove(originalConsumer); + consumerSet.removeAll(originalConsumer); + consumerList.add(spyServiceConsumer); + consumerSet.add(spyServiceConsumer); + return originalConsumer; + } + + /*** + * 1. Send a batch message contains 100 single messages. + * 2. Ack 2 messages. + * 3. Redeliver the batch message and ack them. + * 4. Verify: the permits is correct. + */ + @Test + public void testPermitsIfHalfAckBatchMessage() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://prop/ns-abc/tp"); + final String subName = "s1"; + final int receiverQueueSize = 1000; + final int ackedMessagesCountInTheFistStep = 2; + admin.topics().createNonPartitionedTopic(topicName); + admin.topics(). createSubscription(topicName, subName, MessageId.earliest); + ConsumerBuilder consumerBuilder = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .receiverQueueSize(receiverQueueSize) + .subscriptionName(subName) + .enableBatchIndexAcknowledgment(true) + .subscriptionType(SubscriptionType.Shared) + .isAckReceiptEnabled(true); + + // Send 100 messages. + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .enableBatching(true) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .create(); + CompletableFuture lastSent = null; + for (int i = 1; i <= 100; i++) { + lastSent = producer. sendAsync(i + ""); + } + producer.flush(); + lastSent.join(); + + // Ack 2 messages, and trigger a redelivery. + Consumer consumer1 = consumerBuilder.subscribe(); + for (int i = 0; i < ackedMessagesCountInTheFistStep; i++) { + Message msg = consumer1. receive(2, TimeUnit.SECONDS); + assertNotNull(msg); + consumer1.acknowledge(msg); + } + consumer1.close(); + + // Receive the left 98 messages, and ack them. + // Verify the permits is correct. + ConsumerImpl consumer2 = (ConsumerImpl) consumerBuilder.subscribe(); + Awaitility.await().until(() -> consumer2.isConnected()); + List messages = new ArrayList<>(); + int nextMessageValue = ackedMessagesCountInTheFistStep + 1; + while (true) { + Message msg = consumer2.receive(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + assertEquals(msg.getValue(), nextMessageValue + ""); + messages.add(msg.getMessageId()); + nextMessageValue++; + } + assertEquals(messages.size(), 98); + consumer2.acknowledge(messages); + + org.apache.pulsar.broker.service.Consumer serviceConsumer2 = + getTheUniqueServiceConsumer(topicName, subName); + Awaitility.await().untilAsserted(() -> { + // After the messages were pop out, the permits in the client memory went to 98. + int permitsInClientMemory = consumer2.getAvailablePermits(); + int permitsInBroker = serviceConsumer2.getAvailablePermits(); + assertEquals(permitsInClientMemory + permitsInBroker, receiverQueueSize); + }); + + // cleanup. + producer.close(); + consumer2.close(); + admin.topics().delete(topicName, false); + } + + private org.apache.pulsar.broker.service.Consumer getTheUniqueServiceConsumer(String topic, String sub) { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService(). getTopic(topic, false).join().get(); + PersistentDispatcherMultipleConsumers dispatcher = + (PersistentDispatcherMultipleConsumers) persistentTopic.getSubscription(sub).getDispatcher(); + return dispatcher.getConsumers().iterator().next(); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBkEnsemblesTests.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBkEnsemblesTests.java index 9f19bda3647f3..40649a4164047 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBkEnsemblesTests.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBkEnsemblesTests.java @@ -21,8 +21,8 @@ import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.retryStrategically; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.fail; - import java.lang.reflect.Field; import java.util.Map.Entry; import java.util.NavigableMap; @@ -31,10 +31,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; - import com.google.common.collect.Sets; import lombok.Cleanup; - import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; @@ -47,6 +45,7 @@ import org.apache.bookkeeper.util.StringUtils; import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.Producer; @@ -497,10 +496,31 @@ public void testDeleteTopicWithMissingData() throws Exception { // Expected } - // Deletion must succeed - admin.topics().delete(topic); + assertThrows(PulsarAdminException.ServerSideErrorException.class, () -> admin.topics().delete(topic)); + } + + @Test + public void testDeleteTopicWithoutTopicLoaded() throws Exception { + String namespace = BrokerTestUtil.newUniqueName("prop/usc"); + admin.namespaces().createNamespace(namespace); + + String topic = BrokerTestUtil.newUniqueName(namespace + "/my-topic"); + + @Cleanup + PulsarClient client = PulsarClient.builder() + .serviceUrl(pulsar.getBrokerServiceUrl()) + .statsInterval(0, TimeUnit.SECONDS) + .build(); - // Topic will not be there after + @Cleanup + Producer producer = client.newProducer(Schema.STRING) + .topic(topic) + .create(); + + producer.close(); + admin.topics().unload(topic); + + admin.topics().delete(topic); assertEquals(pulsar.getBrokerService().getTopicIfExists(topic).join(), Optional.empty()); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java index 951892f4ebfbc..5252407892eea 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerBookieIsolationTest.java @@ -304,6 +304,7 @@ public void testSetRackInfoAndAffinityGroupDuringProduce() throws Exception { bookies[3].getBookieId()); ServiceConfiguration config = new ServiceConfiguration(); + config.setTopicLevelPoliciesEnabled(false); config.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); config.setClusterName(cluster); config.setWebServicePort(Optional.of(0)); @@ -612,9 +613,9 @@ public void testBookieIsolationWithSecondaryGroup() throws Exception { config.setBrokerShutdownTimeoutMs(0L); config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); config.setBrokerServicePort(Optional.of(0)); + config.setTopicLevelPoliciesEnabled(false); config.setAdvertisedAddress("localhost"); config.setBookkeeperClientIsolationGroups(brokerBookkeeperClientIsolationGroups); - config.setManagedLedgerDefaultEnsembleSize(2); config.setManagedLedgerDefaultWriteQuorum(2); config.setManagedLedgerDefaultAckQuorum(2); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java index a28b60bbae354..0a6cffc7685d4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java @@ -25,18 +25,27 @@ import java.util.List; +import java.util.Optional; +import java.util.Set; import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import lombok.Cleanup; +import org.apache.pulsar.broker.loadbalance.extensions.ExtensibleLoadManagerImpl; +import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl; import org.apache.pulsar.client.admin.ListNamespaceTopicsOptions; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.SystemTopicNames; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.partition.PartitionedTopicMetadata; import org.apache.pulsar.common.policies.data.AutoTopicCreationOverride; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicType; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterClass; @@ -526,4 +535,58 @@ public void testDynamicConfigurationTopicAutoCreationPartitionedWhenDefaultMoreT } } + @Test + public void testExtensibleLoadManagerImplInternalTopicAutoCreations() + throws PulsarAdminException, PulsarClientException { + pulsar.getConfiguration().setAllowAutoTopicCreation(true); + pulsar.getConfiguration().setAllowAutoTopicCreationType(TopicType.PARTITIONED); + pulsar.getConfiguration().setDefaultNumPartitions(3); + pulsar.getConfiguration().setMaxNumPartitionsPerPartitionedTopic(5); + final String namespaceName = NamespaceName.SYSTEM_NAMESPACE.toString(); + TenantInfoImpl tenantInfo = new TenantInfoImpl(); + tenantInfo.setAllowedClusters(Set.of(configClusterName)); + admin.tenants().createTenant("pulsar", tenantInfo); + admin.namespaces().createNamespace(namespaceName); + admin.topics().createNonPartitionedTopic(ServiceUnitStateChannelImpl.TOPIC); + admin.topics().createNonPartitionedTopic(ExtensibleLoadManagerImpl.BROKER_LOAD_DATA_STORE_TOPIC); + admin.topics().createNonPartitionedTopic(ExtensibleLoadManagerImpl.TOP_BUNDLES_LOAD_DATA_STORE_TOPIC); + + // clear the topics to test the auto creation of non-persistent topics. + ConcurrentOpenHashMap>> topics = + pulsar.getBrokerService().getTopics(); + ConcurrentOpenHashMap>> oldTopics = new ConcurrentOpenHashMap<>(); + topics.forEach((key, val) -> oldTopics.put(key, val)); + topics.clear(); + + // The created persistent topic correctly can be found by + // pulsar.getPulsarResources().getTopicResources().persistentTopicExists(topic); + Producer producer = pulsarClient.newProducer().topic(ServiceUnitStateChannelImpl.TOPIC).create(); + + // The created non-persistent topics cannot be found, as we did topics.clear() + try { + pulsarClient.newProducer().topic(ExtensibleLoadManagerImpl.BROKER_LOAD_DATA_STORE_TOPIC).create(); + Assert.fail("Create should have failed."); + } catch (PulsarClientException.TopicDoesNotExistException e) { + // expected + } + try { + pulsarClient.newProducer().topic(ExtensibleLoadManagerImpl.TOP_BUNDLES_LOAD_DATA_STORE_TOPIC).create(); + Assert.fail("Create should have failed."); + } catch (PulsarClientException.TopicDoesNotExistException e) { + // expected + } + + oldTopics.forEach((key, val) -> topics.put(key, val)); + + Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> { + List partitionedTopicList = admin.topics().getPartitionedTopicList(namespaceName); + assertEquals(partitionedTopicList.size(), 0); + }); + + producer.close(); + admin.namespaces().deleteNamespace(namespaceName); + admin.tenants().deleteTenant("pulsar"); + + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java index 3d67a4bc840a5..2b3d73a957925 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java @@ -67,6 +67,7 @@ import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.bookkeeper.mledger.ManagedLedgerException; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerFactoryImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.http.HttpResponse; @@ -77,6 +78,7 @@ import org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.BrokerServiceException.PersistenceException; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.stats.prometheus.PrometheusRawMetricsProvider; import org.apache.pulsar.client.admin.BrokerStats; @@ -91,6 +93,7 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionInitialPosition; +import org.apache.pulsar.client.api.SubscriptionMode; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.impl.ClientCnx; import org.apache.pulsar.client.impl.ConnectionPool; @@ -109,6 +112,7 @@ import org.apache.pulsar.common.policies.data.TopicStats; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.util.netty.EventLoopUtil; +import org.apache.pulsar.compaction.Compactor; import org.awaitility.Awaitility; import org.mockito.Mockito; import org.testng.Assert; @@ -1143,7 +1147,7 @@ public void testTopicLoadingOnDisableNamespaceBundle() throws Exception { // try to create topic which should fail as bundle is disable CompletableFuture> futureResult = pulsar.getBrokerService() - .loadOrCreatePersistentTopic(topicName, true, null); + .loadOrCreatePersistentTopic(topicName, true, null, null); try { futureResult.get(); @@ -1187,7 +1191,7 @@ public void testConcurrentLoadTopicExceedLimitShouldNotBeAutoCreated() throws Ex for (int i = 0; i < 10; i++) { // try to create topic which should fail as bundle is disable CompletableFuture> futureResult = pulsar.getBrokerService() - .loadOrCreatePersistentTopic(topicName + "_" + i, false, null); + .loadOrCreatePersistentTopic(topicName + "_" + i, false, null, null); loadFutures.add(futureResult); } @@ -1218,6 +1222,69 @@ public void testConcurrentLoadTopicExceedLimitShouldNotBeAutoCreated() throws Ex } } + @Test + public void testCheckInactiveSubscriptionsShouldNotDeleteCompactionCursor() throws Exception { + String namespace = "prop/test"; + + // set up broker set compaction threshold. + cleanup(); + conf.setBrokerServiceCompactionThresholdInBytes(8); + setup(); + + try { + admin.namespaces().createNamespace(namespace); + } catch (PulsarAdminException.ConflictException e) { + // Ok.. (if test fails intermittently and namespace is already created) + } + + // set enable subscription expiration. + admin.namespaces().setSubscriptionExpirationTime(namespace, 1); + + String compactionInactiveTestTopic = "persistent://prop/test/testCompactionCursorShouldNotDelete"; + + admin.topics().createNonPartitionedTopic(compactionInactiveTestTopic); + + CompletableFuture> topicCf = + pulsar.getBrokerService().getTopic(compactionInactiveTestTopic, true); + + Optional topicOptional = topicCf.get(); + assertTrue(topicOptional.isPresent()); + + PersistentTopic topic = (PersistentTopic) topicOptional.get(); + + PersistentSubscription sub = (PersistentSubscription) topic.getSubscription(Compactor.COMPACTION_SUBSCRIPTION); + assertNotNull(sub); + + topic.checkCompaction(); + + Field currentCompaction = PersistentTopic.class.getDeclaredField("currentCompaction"); + currentCompaction.setAccessible(true); + CompletableFuture compactionFuture = (CompletableFuture)currentCompaction.get(topic); + + compactionFuture.get(); + + ManagedCursorImpl cursor = (ManagedCursorImpl) sub.getCursor(); + + // make cursor last active time to very small to check if it will be deleted + Field cursorLastActiveField = ManagedCursorImpl.class.getDeclaredField("lastActive"); + cursorLastActiveField.setAccessible(true); + cursorLastActiveField.set(cursor, 0); + + // replace origin object. so we can check if subscription is deleted. + PersistentSubscription spySubscription = Mockito.spy(sub); + topic.getSubscriptions().put(Compactor.COMPACTION_SUBSCRIPTION, spySubscription); + + // trigger inactive check. + topic.checkInactiveSubscriptions(); + + // Compaction subscription should not call delete method. + Mockito.verify(spySubscription, Mockito.never()).delete(); + + // check if the subscription exist. + assertNotNull(topic.getSubscription(Compactor.COMPACTION_SUBSCRIPTION)); + + } + /** * Verifies brokerService should not have deadlock and successfully remove topic from topicMap on topic-failure and * it should not introduce deadlock while performing it. @@ -1528,8 +1595,10 @@ public void testIsSystemTopic() { assertTrue(brokerService.isSystemTopic(TRANSACTION_COORDINATOR_ASSIGN)); assertTrue(brokerService.isSystemTopic(TRANSACTION_COORDINATOR_LOG)); - NamespaceName heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfig()); - NamespaceName heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), pulsar.getConfig()); + NamespaceName heartbeatNamespaceV1 = NamespaceService + .getHeartbeatNamespace(pulsar.getBrokerId(), pulsar.getConfig()); + NamespaceName heartbeatNamespaceV2 = NamespaceService + .getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfig()); assertTrue(brokerService.isSystemTopic("persistent://" + heartbeatNamespaceV1.toString() + "/healthcheck")); assertTrue(brokerService.isSystemTopic(heartbeatNamespaceV2.toString() + "/healthcheck")); } @@ -1666,4 +1735,55 @@ public void testIsSystemTopicAllowAutoTopicCreationAsync() throws Exception { assertTrue(brokerService.isAllowAutoTopicCreationAsync( "persistent://pulsar/system/my-system-topic").get()); } + + @Test + public void testDuplicateAcknowledgement() throws Exception { + final String ns = "prop/ns-test"; + + admin.namespaces().createNamespace(ns, 2); + final String topicName = "persistent://prop/ns-test/duplicated-acknowledgement-test"; + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topicName) + .create(); + @Cleanup + Consumer consumer1 = pulsarClient.newConsumer() + .topic(topicName) + .subscriptionName("sub-1") + .acknowledgmentGroupTime(0, TimeUnit.SECONDS) + .subscriptionType(SubscriptionType.Shared) + .isAckReceiptEnabled(true) + .subscribe(); + producer.send("1".getBytes(StandardCharsets.UTF_8)); + Message message = consumer1.receive(); + consumer1.acknowledge(message); + consumer1.acknowledge(message); + assertEquals(admin.topics().getStats(topicName).getSubscriptions() + .get("sub-1").getUnackedMessages(), 0); + } + + @Test + public void testUnsubscribeNonDurableSub() throws Exception { + final String ns = "prop/ns-test"; + final String topic = ns + "/testUnsubscribeNonDurableSub"; + + admin.namespaces().createNamespace(ns, 2); + admin.topics().createPartitionedTopic(String.format("persistent://%s", topic), 1); + + pulsarClient.newProducer(Schema.STRING).topic(topic).create().close(); + @Cleanup + Consumer consumer = pulsarClient + .newConsumer(Schema.STRING) + .topic(topic) + .subscriptionMode(SubscriptionMode.NonDurable) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionName("sub1") + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + try { + consumer.unsubscribe(); + } catch (Exception ex) { + fail("Unsubscribe failed"); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ClusterMigrationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ClusterMigrationTest.java index 469e155d409b3..a2dcf3c9c0b4d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ClusterMigrationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ClusterMigrationTest.java @@ -25,11 +25,12 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; - +import com.google.common.collect.Sets; import java.lang.reflect.Method; import java.net.URL; +import java.util.Optional; import java.util.concurrent.TimeUnit; - +import lombok.Cleanup; import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; @@ -50,11 +51,7 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import com.google.common.collect.Sets; - -import lombok.Cleanup; - -@Test(groups = "broker") +@Test(groups = "cluster-migration") public class ClusterMigrationTest { private static final Logger log = LoggerFactory.getLogger(ClusterMigrationTest.class); @@ -202,9 +199,13 @@ public void setup() throws Exception { protected void cleanup() throws Exception { log.info("--- Shutting down ---"); broker1.cleanup(); + admin1.close(); broker2.cleanup(); + admin2.close(); broker3.cleanup(); + admin3.close(); broker4.cleanup(); + admin4.close(); } @BeforeMethod(alwaysRun = true) @@ -399,7 +400,7 @@ public void testClusterMigrationWithReplicationBacklog(boolean persistent, Subsc assertEquals(topic1.getReplicators().size(), 1); // stop service in the replication cluster to build replication backlog - broker3.cleanup(); + broker3.stop(); retryStrategically((test) -> broker3.getPulsarService() == null, 10, 1000); assertNull(pulsar3.getBrokerService()); @@ -477,6 +478,14 @@ protected void setup() throws Exception { super.setupWithClusterName(clusterName); } + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + this.conf.setWebServicePortTls(Optional.of(0)); + this.conf.setBrokerServicePortTls(Optional.of(0)); + } + + public PulsarService getPulsarService() { return pulsar; } @@ -485,9 +494,13 @@ public String getClusterName() { return configClusterName; } + public void stop() throws Exception { + stopBroker(); + } + @Override protected void cleanup() throws Exception { - stopBroker(); + internalCleanup(); } public void restart() throws Exception { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelectorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelectorTest.java index dbca31416bb9d..48311c57338b5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelectorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelectorTest.java @@ -21,18 +21,18 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; - -import org.apache.pulsar.broker.service.BrokerServiceException.ConsumerAssignException; -import org.apache.pulsar.client.api.Range; -import org.testng.Assert; -import org.testng.annotations.Test; - import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.pulsar.broker.service.BrokerServiceException.ConsumerAssignException; +import org.apache.pulsar.client.api.Range; +import org.testng.Assert; +import org.testng.annotations.Test; @Test(groups = "broker") public class ConsistentHashingStickyKeyConsumerSelectorTest { @@ -154,17 +154,17 @@ public void testGetConsumerKeyHashRanges() throws BrokerServiceException.Consume } Map> expectedResult = new HashMap<>(); expectedResult.put(consumers.get(0), Arrays.asList( - Range.of(0, 330121749), - Range.of(330121750, 618146114), - Range.of(1797637922, 1976098885))); + Range.of(119056335, 242013991), + Range.of(722195657, 1656011842), + Range.of(1707482098, 1914695766))); expectedResult.put(consumers.get(1), Arrays.asList( - Range.of(938427576, 1094135919), - Range.of(1138613629, 1342907082), - Range.of(1342907083, 1797637921))); + Range.of(0, 90164503), + Range.of(90164504, 119056334), + Range.of(382436668, 722195656))); expectedResult.put(consumers.get(2), Arrays.asList( - Range.of(618146115, 772640562), - Range.of(772640563, 938427575), - Range.of(1094135920, 1138613628))); + Range.of(242013992, 242377547), + Range.of(242377548, 382436667), + Range.of(1656011843, 1707482097))); for (Map.Entry> entry : selector.getConsumerKeyHashRanges().entrySet()) { System.out.println(entry.getValue()); Assert.assertEquals(entry.getValue(), expectedResult.get(entry.getKey())); @@ -172,4 +172,48 @@ public void testGetConsumerKeyHashRanges() throws BrokerServiceException.Consume } Assert.assertEquals(expectedResult.size(), 0); } + + // reproduces https://github.com/apache/pulsar/issues/22050 + @Test + public void shouldNotCollideWithConsumerNameEndsWithNumber() { + ConsistentHashingStickyKeyConsumerSelector selector = new ConsistentHashingStickyKeyConsumerSelector(12); + List consumerName = Arrays.asList("consumer1", "consumer11"); + List consumers = new ArrayList<>(); + for (String s : consumerName) { + Consumer consumer = mock(Consumer.class); + when(consumer.consumerName()).thenReturn(s); + selector.addConsumer(consumer); + consumers.add(consumer); + } + Map rangeToConsumer = new HashMap<>(); + for (Map.Entry> entry : selector.getConsumerKeyHashRanges().entrySet()) { + for (Range range : entry.getValue()) { + Consumer previous = rangeToConsumer.put(range, entry.getKey()); + if (previous != null) { + Assert.fail("Ranges are colliding between " + previous.consumerName() + " and " + entry.getKey() + .consumerName()); + } + } + } + } + + @Test + public void shouldRemoveConsumersFromConsumerKeyHashRanges() { + ConsistentHashingStickyKeyConsumerSelector selector = new ConsistentHashingStickyKeyConsumerSelector(12); + List consumers = IntStream.range(1, 100).mapToObj(i -> "consumer" + i) + .map(consumerName -> { + Consumer consumer = mock(Consumer.class); + when(consumer.consumerName()).thenReturn(consumerName); + return consumer; + }).collect(Collectors.toList()); + + // when consumers are added + consumers.forEach(selector::addConsumer); + // then each consumer should have a range + Assert.assertEquals(selector.getConsumerKeyHashRanges().size(), consumers.size()); + // when consumers are removed + consumers.forEach(selector::removeConsumer); + // then there should be no mapping remaining + Assert.assertEquals(selector.getConsumerKeyHashRanges().size(), 0); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/EnableProxyProtocolTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/EnableProxyProtocolTest.java index 2f128fe6270a5..33e797fcb219f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/EnableProxyProtocolTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/EnableProxyProtocolTest.java @@ -19,9 +19,18 @@ package org.apache.pulsar.broker.service; import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; +import java.util.concurrent.TimeUnit; import lombok.Cleanup; -import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.client.api.ClientBuilder; +import org.apache.pulsar.client.api.InjectedClientCnxClientBuilder; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.ClientBuilderImpl; import org.apache.pulsar.client.impl.ClientCnx; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.policies.data.SubscriptionStats; @@ -32,10 +41,6 @@ import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import java.net.InetSocketAddress; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; - @Test(groups = "broker") public class EnableProxyProtocolTest extends BrokerTestBase { @@ -46,6 +51,15 @@ protected void setup() throws Exception { super.baseSetup(); } + protected PulsarClient newPulsarClient(String url, int intervalInSecs) throws PulsarClientException { + ClientBuilder clientBuilder = + PulsarClient.builder() + .serviceUrl(url) + .statsInterval(intervalInSecs, TimeUnit.SECONDS); + customizeNewPulsarClientBuilder(clientBuilder); + return createNewPulsarClient(clientBuilder); + } + @AfterClass(alwaysRun = true) @Override protected void cleanup() throws Exception { @@ -53,7 +67,7 @@ protected void cleanup() throws Exception { } @Test - public void testSimpleProduceAndConsume() throws PulsarClientException { + public void testSimpleProduceAndConsume() throws Exception { final String namespace = "prop/ns-abc"; final String topicName = "persistent://" + namespace + "/testSimpleProduceAndConsume"; final String subName = "my-subscriber-name"; @@ -76,30 +90,104 @@ public void testSimpleProduceAndConsume() throws PulsarClientException { } Assert.assertEquals(received, messages); + + // cleanup. + org.apache.pulsar.broker.service.Consumer serverConsumer = pulsar.getBrokerService().getTopicReference(topicName) + .get().getSubscription(subName).getConsumers().get(0); + ((ServerCnx) serverConsumer.cnx()).close(); + consumer.close(); + producer.close(); + admin.topics().delete(topicName); } @Test - public void testProxyProtocol() throws PulsarClientException, ExecutionException, InterruptedException, PulsarAdminException { + public void testProxyProtocol() throws Exception { final String namespace = "prop/ns-abc"; final String topicName = "persistent://" + namespace + "/testProxyProtocol"; final String subName = "my-subscriber-name"; - PulsarClientImpl client = (PulsarClientImpl) pulsarClient; - CompletableFuture cnx = client.getCnxPool().getConnection(InetSocketAddress.createUnresolved("localhost", pulsar.getBrokerListenPort().get())); - // Simulate the proxy protcol message - cnx.get().ctx().channel().writeAndFlush(Unpooled.copiedBuffer("PROXY TCP4 198.51.100.22 203.0.113.7 35646 80\r\n".getBytes())); - pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) - .subscribe(); - org.apache.pulsar.broker.service.Consumer c = pulsar.getBrokerService().getTopicReference(topicName).get().getSubscription(subName).getConsumers().get(0); - Awaitility.await().untilAsserted(() -> Assert.assertTrue(c.cnx().hasHAProxyMessage())); + + // Create a client that injected the protocol implementation. + ClientBuilderImpl clientBuilder = (ClientBuilderImpl) PulsarClient.builder().serviceUrl(lookupUrl.toString()); + PulsarClientImpl protocolClient = InjectedClientCnxClientBuilder.create(clientBuilder, + (conf, eventLoopGroup) -> new ClientCnx(conf, eventLoopGroup) { + public void channelActive(ChannelHandlerContext ctx) throws Exception { + byte[] bs = "PROXY TCP4 198.51.100.22 203.0.113.7 35646 80\r\n".getBytes(); + ctx.writeAndFlush(Unpooled.copiedBuffer(bs)); + super.channelActive(ctx); + } + }); + + // Verify the addr can be handled correctly. + testPubAndSub(topicName, subName, "198.51.100.22:35646", protocolClient); + + // cleanup. + admin.topics().delete(topicName); + } + + @Test(timeOut = 10000) + public void testPubSubWhenSlowNetwork() throws Exception { + final String namespace = "prop/ns-abc"; + final String topicName = BrokerTestUtil.newUniqueName("persistent://" + namespace + "/tp"); + final String subName = "my-subscriber-name"; + + // Create a client that injected the protocol implementation. + ClientBuilderImpl clientBuilder = (ClientBuilderImpl) PulsarClient.builder().serviceUrl(lookupUrl.toString()); + PulsarClientImpl protocolClient = InjectedClientCnxClientBuilder.create(clientBuilder, + (conf, eventLoopGroup) -> new ClientCnx(conf, eventLoopGroup) { + public void channelActive(ChannelHandlerContext ctx) throws Exception { + Thread task = new Thread(() -> { + try { + byte[] bs1 = "PROXY".getBytes(); + byte[] bs2 = " TCP4 198.51.100.22 203.0.113.7 35646 80\r\n".getBytes(); + ctx.writeAndFlush(Unpooled.copiedBuffer(bs1)); + Thread.sleep(100); + ctx.writeAndFlush(Unpooled.copiedBuffer(bs2)); + super.channelActive(ctx); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + task.start(); + } + }); + + // Verify the addr can be handled correctly. + testPubAndSub(topicName, subName, "198.51.100.22:35646", protocolClient); + + // cleanup. + admin.topics().delete(topicName); + } + + private void testPubAndSub(String topicName, String subName, String expectedHostAndPort, + PulsarClientImpl pulsarClient) throws Exception { + // Verify: subscribe + org.apache.pulsar.client.api.Consumer clientConsumer = pulsarClient.newConsumer(Schema.STRING).topic(topicName) + .subscriptionName(subName).subscribe(); + org.apache.pulsar.broker.service.Consumer serverConsumer = pulsar.getBrokerService() + .getTopicReference(topicName).get().getSubscription(subName).getConsumers().get(0); + Awaitility.await().untilAsserted(() -> Assert.assertTrue(serverConsumer.cnx().hasHAProxyMessage())); TopicStats topicStats = admin.topics().getStats(topicName); Assert.assertEquals(topicStats.getSubscriptions().size(), 1); SubscriptionStats subscriptionStats = topicStats.getSubscriptions().get(subName); Assert.assertEquals(subscriptionStats.getConsumers().size(), 1); - Assert.assertEquals(subscriptionStats.getConsumers().get(0).getAddress(), "198.51.100.22:35646"); + Assert.assertEquals(subscriptionStats.getConsumers().get(0).getAddress(), expectedHostAndPort); + + // Verify: producer register. + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topicName).create(); + TopicStats topicStats2 = admin.topics().getStats(topicName); + Assert.assertEquals(topicStats2.getPublishers().size(), 1); + Assert.assertEquals(topicStats2.getPublishers().get(0).getAddress(), expectedHostAndPort); + + // Verify: Pub & Sub + producer.send("1"); + Message msg = clientConsumer.receive(2, TimeUnit.SECONDS); + Assert.assertNotNull(msg); + Assert.assertEquals(msg.getValue(), "1"); + clientConsumer.acknowledge(msg); - pulsarClient.newProducer().topic(topicName).create(); - topicStats = admin.topics().getStats(topicName); - Assert.assertEquals(topicStats.getPublishers().size(), 1); - Assert.assertEquals(topicStats.getPublishers().get(0).getAddress(), "198.51.100.22:35646"); + // cleanup. + ((ServerCnx) serverConsumer.cnx()).close(); + producer.close(); + clientConsumer.close(); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java index 0ce9a33283bfa..1549ba8d81c09 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java @@ -598,10 +598,12 @@ public void testHealthTopicInactiveNotClean() throws Exception { conf.setBrokerDeleteInactiveTopicsFrequencySeconds(1); super.baseSetup(); // init topic - NamespaceName heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfig()); + NamespaceName heartbeatNamespaceV1 = NamespaceService + .getHeartbeatNamespace(pulsar.getBrokerId(), pulsar.getConfig()); final String healthCheckTopicV1 = "persistent://" + heartbeatNamespaceV1 + "/healthcheck"; - NamespaceName heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), pulsar.getConfig()); + NamespaceName heartbeatNamespaceV2 = NamespaceService + .getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfig()); final String healthCheckTopicV2 = "persistent://" + heartbeatNamespaceV2 + "/healthcheck"; admin.brokers().healthcheck(TopicVersion.V1); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/OneWayReplicatorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/OneWayReplicatorTest.java index e068cd55e9055..5d748a9689e2f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/OneWayReplicatorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/OneWayReplicatorTest.java @@ -21,16 +21,22 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; +import java.lang.reflect.Field; import java.util.Optional; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.broker.service.persistent.PersistentReplicator; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.ProducerImpl; import org.apache.pulsar.common.policies.data.TopicStats; import org.awaitility.Awaitility; +import org.mockito.Mockito; +import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; @@ -50,6 +56,29 @@ public void cleanup() throws Exception { super.cleanup(); } + private void waitReplicatorStarted(String topicName) { + Awaitility.await().untilAsserted(() -> { + Optional topicOptional2 = pulsar2.getBrokerService().getTopic(topicName, false).get(); + assertTrue(topicOptional2.isPresent()); + PersistentTopic persistentTopic2 = (PersistentTopic) topicOptional2.get(); + assertFalse(persistentTopic2.getProducers().isEmpty()); + }); + } + + /** + * Override "AbstractReplicator.producer" by {@param producer} and return the original value. + */ + private ProducerImpl overrideProducerForReplicator(AbstractReplicator replicator, ProducerImpl newProducer) + throws Exception { + Field producerField = AbstractReplicator.class.getDeclaredField("producer"); + producerField.setAccessible(true); + ProducerImpl originalValue = (ProducerImpl) producerField.get(replicator); + synchronized (replicator) { + producerField.set(replicator, newProducer); + } + return originalValue; + } + @Test public void testReplicatorProducerStatInTopic() throws Exception { final String topicName = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp_"); @@ -85,18 +114,13 @@ public void testReplicatorProducerStatInTopic() throws Exception { public void testCreateRemoteConsumerFirst() throws Exception { final String topicName = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp_"); Producer producer1 = client1.newProducer(Schema.STRING).topic(topicName).create(); - // Wait for replicator started. - Awaitility.await().untilAsserted(() -> { - Optional topicOptional2 = pulsar2.getBrokerService().getTopic(topicName, false).get(); - assertTrue(topicOptional2.isPresent()); - PersistentTopic persistentTopic2 = (PersistentTopic) topicOptional2.get(); - assertFalse(persistentTopic2.getProducers().isEmpty()); - }); + // The topic in cluster2 has a replicator created producer(schema Auto_Produce), but does not have any schema。 // Verify: the consumer of this cluster2 can create successfully. Consumer consumer2 = client2.newConsumer(Schema.STRING).topic(topicName).subscriptionName("s1") .subscribe();; - + // Wait for replicator started. + waitReplicatorStarted(topicName); // cleanup. producer1.close(); consumer2.close(); @@ -105,4 +129,34 @@ public void testCreateRemoteConsumerFirst() throws Exception { admin2.topics().delete(topicName); }); } + + @Test + public void testTopicCloseWhenInternalProducerCloseErrorOnce() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://" + defaultNamespace + "/tp_"); + admin1.topics().createNonPartitionedTopic(topicName); + // Wait for replicator started. + waitReplicatorStarted(topicName); + PersistentTopic persistentTopic = + (PersistentTopic) pulsar1.getBrokerService().getTopic(topicName, false).join().get(); + PersistentReplicator replicator = + (PersistentReplicator) persistentTopic.getReplicators().values().iterator().next(); + // Mock an error when calling "replicator.disconnect()" + ProducerImpl mockProducer = Mockito.mock(ProducerImpl.class); + Mockito.when(mockProducer.closeAsync()).thenReturn(CompletableFuture.failedFuture(new Exception("mocked ex"))); + ProducerImpl originalProducer = overrideProducerForReplicator(replicator, mockProducer); + // Verify: since the "replicator.producer.closeAsync()" will retry after it failed, the topic unload should be + // successful. + admin1.topics().unload(topicName); + // Verify: After "replicator.producer.closeAsync()" retry again, the "replicator.producer" will be closed + // successful. + overrideProducerForReplicator(replicator, originalProducer); + Awaitility.await().untilAsserted(() -> { + Assert.assertFalse(replicator.isConnected()); + }); + // cleanup. + cleanupTopics(() -> { + admin1.topics().delete(topicName); + admin2.topics().delete(topicName); + }); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentMessageFinderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentMessageFinderTest.java index f0e2e6eafcdfb..ace552a55a72a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentMessageFinderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentMessageFinderTest.java @@ -81,8 +81,11 @@ public class PersistentMessageFinderTest extends MockedBookKeeperTestCase { public static byte[] createMessageWrittenToLedger(String msg) { + return createMessageWrittenToLedger(msg, System.currentTimeMillis()); + } + public static byte[] createMessageWrittenToLedger(String msg, long messageTimestamp) { MessageMetadata messageMetadata = new MessageMetadata() - .setPublishTime(System.currentTimeMillis()) + .setPublishTime(messageTimestamp) .setProducerName("createMessageWrittenToLedger") .setSequenceId(1); ByteBuf data = UnpooledByteBufAllocator.DEFAULT.heapBuffer().writeBytes(msg.getBytes()); @@ -437,6 +440,29 @@ void testMessageExpiryWithTimestampNonRecoverableException() throws Exception { } + @Test + public void testIncorrectClientClock() throws Exception { + final String ledgerAndCursorName = "testIncorrectClientClock"; + int maxTTLSeconds = 1; + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setMaxEntriesPerLedger(1); + ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open(ledgerAndCursorName, config); + ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor(ledgerAndCursorName); + // set client clock to 10 days later + long incorrectPublishTimestamp = System.currentTimeMillis() + TimeUnit.DAYS.toMillis(10); + for (int i = 0; i < 10; i++) { + ledger.addEntry(createMessageWrittenToLedger("msg" + i, incorrectPublishTimestamp)); + } + assertEquals(ledger.getLedgersInfoAsList().size(), 10); + PersistentTopic mock = mock(PersistentTopic.class); + when(mock.getName()).thenReturn("topicname"); + when(mock.getLastPosition()).thenReturn(PositionImpl.EARLIEST); + PersistentMessageExpiryMonitor monitor = new PersistentMessageExpiryMonitor(mock, c1.getName(), c1, null); + Thread.sleep(TimeUnit.SECONDS.toMillis(maxTTLSeconds)); + monitor.expireMessages(maxTTLSeconds); + assertEquals(c1.getNumberOfEntriesInBacklog(true), 0); + } + @Test void testMessageExpiryWithPosition() throws Exception { final String ledgerAndCursorName = "testPersistentMessageExpiryWithPositionNonRecoverableLedgers"; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicInitializeDelayTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicInitializeDelayTest.java new file mode 100644 index 0000000000000..ab8d4dbe5cc01 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicInitializeDelayTest.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.ManagedLedger; +import org.apache.pulsar.broker.service.nonpersistent.NonPersistentTopic; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.Policies; +import org.apache.pulsar.common.policies.data.TenantInfo; +import org.awaitility.Awaitility; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +@Test(groups = "broker") +@Slf4j +public class PersistentTopicInitializeDelayTest extends BrokerTestBase { + + @BeforeMethod + @Override + protected void setup() throws Exception { + conf.setTopicFactoryClassName(MyTopicFactory.class.getName()); + conf.setAllowAutoTopicCreation(true); + conf.setManagedLedgerMaxEntriesPerLedger(1); + conf.setBrokerDeleteInactiveTopicsEnabled(false); + conf.setTransactionCoordinatorEnabled(false); + conf.setTopicLoadTimeoutSeconds(30); + super.baseSetup(); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test(timeOut = 30 * 1000) + public void testTopicInitializeDelay() throws Exception { + admin.tenants().createTenant("public", TenantInfo.builder().allowedClusters(Set.of(configClusterName)).build()); + String namespace = "public/initialize-delay"; + admin.namespaces().createNamespace(namespace); + final String topicName = "persistent://" + namespace + "/testTopicInitializeDelay"; + admin.topics().createNonPartitionedTopic(topicName); + + admin.topicPolicies().setMaxConsumers(topicName, 10); + Awaitility.await().untilAsserted(() -> assertEquals(admin.topicPolicies().getMaxConsumers(topicName), 10)); + admin.topics().unload(topicName); + CompletableFuture> optionalFuture = pulsar.getBrokerService().getTopic(topicName, true); + + Optional topic = optionalFuture.get(15, TimeUnit.SECONDS); + assertTrue(topic.isPresent()); + } + + public static class MyTopicFactory implements TopicFactory { + @Override + public T create(String topic, ManagedLedger ledger, BrokerService brokerService, + Class topicClazz) { + try { + if (topicClazz == NonPersistentTopic.class) { + return (T) new NonPersistentTopic(topic, brokerService); + } else { + return (T) new MyPersistentTopic(topic, ledger, brokerService); + } + } catch (Exception e) { + throw new IllegalStateException(e); + } + } + + @Override + public void close() throws IOException { + // No-op + } + } + + public static class MyPersistentTopic extends PersistentTopic { + + private static AtomicInteger checkReplicationInvocationCount = new AtomicInteger(0); + + public MyPersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerService) { + super(topic, ledger, brokerService); + SystemTopicBasedTopicPoliciesService topicPoliciesService = + (SystemTopicBasedTopicPoliciesService) brokerService.getPulsar().getTopicPoliciesService(); + if (topicPoliciesService.getListeners().containsKey(TopicName.get(topic)) ) { + this.onUpdate(brokerService.getPulsar().getTopicPoliciesService().getTopicPoliciesIfExists(TopicName.get(topic))); + } + } + + protected void updateTopicPolicyByNamespacePolicy(Policies namespacePolicies) { + try { + Thread.sleep(10 * 1000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + super.updateTopicPolicyByNamespacePolicy(namespacePolicies); + } + + public CompletableFuture checkReplication() { + if (TopicName.get(topic).getLocalName().equalsIgnoreCase("testTopicInitializeDelay")) { + checkReplicationInvocationCount.incrementAndGet(); + log.info("checkReplication, count = {}", checkReplicationInvocationCount.get()); + List configuredClusters = topicPolicies.getReplicationClusters().get(); + if (!(configuredClusters.size() == 1 && configuredClusters.contains(brokerService.pulsar().getConfiguration().getClusterName()))) { + try { + // this will cause the get topic timeout. + Thread.sleep(8 * 1000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + throw new RuntimeException("checkReplication error"); + } + } + return super.checkReplication(); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java index 078208f7e449c..71310fef8102a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java @@ -105,6 +105,7 @@ import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.ProducerBuilderImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.conf.ProducerConfigurationData; @@ -1713,6 +1714,8 @@ public void testAtomicReplicationRemoval() throws Exception { ManagedCursor cursor = mock(ManagedCursorImpl.class); doReturn(remoteCluster).when(cursor).getName(); PulsarClientImpl pulsarClientMock = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClientMock.getCnxPool()).thenReturn(connectionPool); when(pulsarClientMock.newProducer(any())).thenAnswer( invocation -> { ProducerBuilderImpl producerBuilder = diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisTopicPublishRateThrottleTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisTopicPublishRateThrottleTest.java index c22ed41fc1533..b13f150387bee 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisTopicPublishRateThrottleTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PrecisTopicPublishRateThrottleTest.java @@ -18,16 +18,25 @@ */ package org.apache.pulsar.broker.service; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiFunction; +import java.util.function.Function; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.common.policies.data.PublishRate; +import org.apache.pulsar.common.util.RateLimiter; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.Test; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - @Test(groups = "broker") +@Slf4j public class PrecisTopicPublishRateThrottleTest extends BrokerTestBase{ @Override @@ -180,4 +189,64 @@ public void testBrokerLevelPublishRateDynamicUpdate() throws Exception{ producer.close(); super.internalCleanup(); } + + @Test + public void testWithConcurrentUpdate() throws Exception { + PublishRate publishRate = new PublishRate(-1,10); + conf.setPreciseTopicPublishRateLimiterEnable(true); + conf.setMaxPendingPublishRequestsPerConnection(1000); + super.baseSetup(); + admin.namespaces().setPublishRate("prop/ns-abc", publishRate); + final String topic = "persistent://prop/ns-abc/testWithConcurrentUpdate"; + @Cleanup + org.apache.pulsar.client.api.Producer producer = pulsarClient.newProducer() + .topic(topic) + .producerName("producer-name") + .create(); + + AbstractTopic topicRef = (AbstractTopic) pulsar.getBrokerService().getTopicReference(topic).get(); + Assert.assertNotNull(topicRef); + + PublishRateLimiter topicPublishRateLimiter = Mockito.spy(topicRef.getTopicPublishRateLimiter()); + + AtomicBoolean blocking = new AtomicBoolean(false); + BiFunction, Long, Boolean> blockFunc = (function, acquirePermit) -> { + blocking.set(true); + while (blocking.get()) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + return function.apply(acquirePermit); + }; + + Mockito.doAnswer(invocation -> { + log.info("tryAcquire: {}, {}", invocation.getArgument(0), invocation.getArgument(1)); + RateLimiter publishRateLimiterOnMessage = + (RateLimiter) FieldUtils.readDeclaredField(topicPublishRateLimiter, + "topicPublishRateLimiterOnMessage", true); + RateLimiter publishRateLimiterOnByte = + (RateLimiter) FieldUtils.readDeclaredField(topicPublishRateLimiter, + "topicPublishRateLimiterOnByte", true); + int numbers = invocation.getArgument(0); + long bytes = invocation.getArgument(1); + return (publishRateLimiterOnMessage == null || publishRateLimiterOnMessage.tryAcquire(numbers)) + && (publishRateLimiterOnByte == null || blockFunc.apply(publishRateLimiterOnByte::tryAcquire, bytes)); + }).when(topicPublishRateLimiter).tryAcquire(Mockito.anyInt(), Mockito.anyLong()); + + FieldUtils.writeField(topicRef, "topicPublishRateLimiter", topicPublishRateLimiter, true); + + CompletableFuture sendFuture = producer.sendAsync(new byte[10]); + + Awaitility.await().untilAsserted(() -> Assert.assertTrue(blocking.get())); + publishRate.publishThrottlingRateInByte = 20; + admin.namespaces().setPublishRate("prop/ns-abc", publishRate); + blocking.set(false); + + sendFuture.join(); + + super.internalCleanup(); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java index 2816a973c92da..4cc3a9ada7d04 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorSubscriptionTest.java @@ -32,6 +32,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -41,6 +42,8 @@ import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.service.persistent.ReplicatedSubscriptionsController; +import org.apache.pulsar.client.admin.LongRunningProcessStatus; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; @@ -49,14 +52,20 @@ import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; +import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.common.policies.data.PartitionedTopicStats; +import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicStats; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.awaitility.Awaitility; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; /** @@ -621,6 +630,27 @@ public void testReplicatedSubscriptionRestApi2() throws Exception { String.format("numReceivedMessages2 (%d) should be less than %d", numReceivedMessages2, numMessages)); } + @Test(timeOut = 30000) + public void testReplicatedSubscriptionRestApi3() throws Exception { + final String namespace = BrokerTestUtil.newUniqueName("geo/replicatedsubscription"); + final String topicName = "persistent://" + namespace + "/topic-rest-api3"; + final String subName = "sub"; + admin4.tenants().createTenant("geo", + new TenantInfoImpl(Sets.newHashSet("appid1", "appid4"), Sets.newHashSet(cluster1, cluster4))); + admin4.namespaces().createNamespace(namespace); + admin4.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet(cluster1, cluster4)); + admin4.topics().createPartitionedTopic(topicName, 2); + + @Cleanup + final PulsarClient client4 = PulsarClient.builder().serviceUrl(url4.toString()) + .statsInterval(0, TimeUnit.SECONDS).build(); + + Consumer consumer4 = client4.newConsumer().topic(topicName).subscriptionName(subName).subscribe(); + Assert.expectThrows(PulsarAdminException.class, () -> + admin4.topics().setReplicatedSubscriptionStatus(topicName, subName, true)); + consumer4.close(); + } + /** * Tests replicated subscriptions when replicator producer is closed */ @@ -704,6 +734,274 @@ public void testReplicatedSubscriptionWhenReplicatorProducerIsClosed() throws Ex Awaitility.await().untilAsserted(() -> assertNotNull(topic2.getSubscription(subscriptionName))); } + @DataProvider(name = "isTopicPolicyEnabled") + private Object[][] isTopicPolicyEnabled() { + // Todo: fix replication can not be enabled at topic level. + return new Object[][] { { Boolean.FALSE } }; + } + + /** + * Test the replication subscription can work normal in the following cases: + *

+ * 1. Do not write data into the original topic when the topic does not configure a remote cluster. {topic1} + * 1. Publish message to the topic and then wait a moment, + * the backlog will not increase after publishing completely. + * 2. Acknowledge the messages, the last confirm entry does not change. + * 2. Snapshot and mark will be written after topic configure a remote cluster. {topic2} + * 1. publish message to topic. After publishing completely, the backlog of the topic keep increase. + * 2. Wait the snapshot complete, the backlog stop changing. + * 3. Publish messages to wait another snapshot complete. + * 4. Ack messages to move the mark delete position after the position record in the first snapshot. + * 5. Check new entry (a mark) appending to the original topic. + * 3. Stopping writing snapshot and mark after remove the remote cluster of the topic. {topic2} + * similar to step 1. + *

+ */ + @Test(dataProvider = "isTopicPolicyEnabled") + public void testWriteMarkerTaskOfReplicateSubscriptions(boolean isTopicPolicyEnabled) throws Exception { + // 1. Prepare resource and use proper configuration. + String namespace = BrokerTestUtil.newUniqueName("pulsar/testReplicateSubBackLog"); + String topic1 = "persistent://" + namespace + "/replication-enable"; + String topic2 = "persistent://" + namespace + "/replication-disable"; + String subName = "sub"; + + admin1.namespaces().createNamespace(namespace); + pulsar1.getConfiguration().setTopicLevelPoliciesEnabled(isTopicPolicyEnabled); + pulsar1.getConfiguration().setReplicationPolicyCheckDurationSeconds(1); + pulsar1.getConfiguration().setReplicatedSubscriptionsSnapshotFrequencyMillis(1000); + // 2. Build Producer and Consumer. + @Cleanup + PulsarClient client1 = PulsarClient.builder().serviceUrl(url1.toString()) + .statsInterval(0, TimeUnit.SECONDS) + .build(); + @Cleanup + Consumer consumer1 = client1.newConsumer() + .topic(topic1) + .subscriptionName(subName) + .ackTimeout(5, TimeUnit.SECONDS) + .subscriptionType(SubscriptionType.Shared) + .replicateSubscriptionState(true) + .subscribe(); + @Cleanup + Producer producer1 = client1.newProducer() + .topic(topic1) + .create(); + // 3. Test replication subscription work as expected. + // Test case 1: disable replication, backlog will not increase. + testReplicatedSubscriptionWhenDisableReplication(producer1, consumer1, topic1); + + // Test case 2: enable replication, mark and snapshot work as expected. + if (isTopicPolicyEnabled) { + admin1.topics().createNonPartitionedTopic(topic2); + admin1.topics().setReplicationClusters(topic2, List.of("r1", "r2")); + } else { + admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet("r1", "r2")); + } + @Cleanup + Consumer consumer2 = client1.newConsumer() + .topic(topic2) + .subscriptionName(subName) + .ackTimeout(5, TimeUnit.SECONDS) + .subscriptionType(SubscriptionType.Shared) + .replicateSubscriptionState(true) + .subscribe(); + @Cleanup + Producer producer2 = client1.newProducer() + .topic(topic2) + .create(); + testReplicatedSubscriptionWhenEnableReplication(producer2, consumer2, topic2); + + // Test case 3: enable replication, mark and snapshot work as expected. + if (isTopicPolicyEnabled) { + admin1.topics().setReplicationClusters(topic2, List.of("r1")); + } else { + admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet("r1")); + } + testReplicatedSubscriptionWhenDisableReplication(producer2, consumer2, topic2); + // 4. Clear resource. + pulsar1.getConfiguration().setForceDeleteNamespaceAllowed(true); + admin1.namespaces().deleteNamespace(namespace, true); + pulsar1.getConfiguration().setForceDeleteNamespaceAllowed(false); + } + + @Test + public void testReplicatedSubscriptionWithCompaction() throws Exception { + final String namespace = BrokerTestUtil.newUniqueName("pulsar/replicatedsubscription"); + final String topicName = "persistent://" + namespace + "/testReplicatedSubscriptionWithCompaction"; + final String subName = "sub"; + + admin1.namespaces().createNamespace(namespace); + admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet("r1", "r2")); + admin1.topics().createNonPartitionedTopic(topicName); + admin1.topicPolicies().setCompactionThreshold(topicName, 100 * 1024 * 1024L); + + @Cleanup final PulsarClient client = PulsarClient.builder().serviceUrl(url1.toString()) + .statsInterval(0, TimeUnit.SECONDS).build(); + + Producer producer = client.newProducer(Schema.STRING).topic(topicName).create(); + producer.newMessage().key("K1").value("V1").send(); + producer.newMessage().key("K1").value("V2").send(); + producer.close(); + + createReplicatedSubscription(client, topicName, subName, true); + Awaitility.await().untilAsserted(() -> { + Map status = admin1.topics().getReplicatedSubscriptionStatus(topicName, subName); + assertTrue(status.get(topicName)); + }); + + Awaitility.await().untilAsserted(() -> { + PersistentTopic t1 = (PersistentTopic) pulsar1.getBrokerService() + .getTopic(topicName, false).get().get(); + ReplicatedSubscriptionsController rsc1 = t1.getReplicatedSubscriptionController().get(); + Assert.assertTrue(rsc1.getLastCompletedSnapshotId().isPresent()); + assertEquals(t1.getPendingWriteOps().get(), 0L); + }); + + admin1.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin1.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + @Cleanup + Consumer consumer = client.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName("sub2") + .subscriptionType(SubscriptionType.Exclusive) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .readCompacted(true) + .subscribe(); + List result = new ArrayList<>(); + while (true) { + Message receive = consumer.receive(2, TimeUnit.SECONDS); + if (receive == null) { + break; + } + + result.add(receive.getValue()); + } + + Assert.assertEquals(result, List.of("V2")); + } + + /** + * Disable replication subscription. + * Test scheduled task case. + * 1. Send three messages |1:0|1:1|1:2|. + * 2. Get topic backlog, as backlog1. + * 3. Wait a moment. + * 4. Get the topic backlog again, the backlog will not increase. + * Test acknowledge messages case. + * 1. Get the last confirm entry, as LAC1. + * 2. Acknowledge these messages |1:0|1:1|. + * 3. wait a moment. + * 4. Get the last confirm entry, as LAC2. LAC1 is equal to LAC2. + * Clear environment. + * 1. Ack all the retained messages. |1:2| + * 2. Wait for the backlog to return to zero. + */ + private void testReplicatedSubscriptionWhenDisableReplication(Producer producer, Consumer consumer, + String topic) throws Exception { + final int messageSum = 3; + // Test scheduled task case. + for (int i = 0; i < messageSum; i++) { + producer.newMessage().send(); + } + long backlog1 = admin1.topics().getStats(topic, false).getBacklogSize(); + Thread.sleep(3000); + long backlog2 = admin1.topics().getStats(topic, false).getBacklogSize(); + assertEquals(backlog1, backlog2); + // Test acknowledge messages case. + String lastConfirmEntry1 = admin1.topics().getInternalStats(topic).lastConfirmedEntry; + for (int i = 0; i < messageSum - 1; i++) { + consumer.acknowledge(consumer.receive(5, TimeUnit.SECONDS)); + } + Awaitility.await().untilAsserted(() -> { + String lastConfirmEntry2 = admin1.topics().getInternalStats(topic).lastConfirmedEntry; + assertEquals(lastConfirmEntry1, lastConfirmEntry2); + }); + // Clear environment. + consumer.acknowledge(consumer.receive(5, TimeUnit.SECONDS)); + Awaitility.await().untilAsserted(() -> { + long backlog4 = admin1.topics().getStats(topic, false).getBacklogSize(); + assertEquals(backlog4, 0); + }); + } + + /** + * Enable replication subscription. + * Test scheduled task case. + * 1. Wait replicator connected. + * 2. Send three messages |1:0|1:1|1:2|. + * 3. Get topic backlog, as backlog1. + * 4. Wait a moment. + * 5. Get the topic backlog again, as backlog2. The backlog2 is bigger than backlog1. |1:0|1:1|1:2|mark|. + * 6. Wait the snapshot complete. + * Test acknowledge messages case. + * 1. Write messages and wait another snapshot complete. |1:0|1:1|1:2|mark|1:3|1:4|1:5|mark| + * 2. Ack message |1:0|1:1|1:2|1:3|1:4|. + * 3. Get last confirm entry, as LAC1. + * 2. Wait a moment. + * 3. Get Last confirm entry, as LAC2. LAC2 different to LAC1. |1:5|mark|mark| + * Clear environment. + * 1. Ack all the retained message |1:5|. + * 2. Wait for the backlog to return to zero. + */ + private void testReplicatedSubscriptionWhenEnableReplication(Producer producer, Consumer consumer, + String topic) throws Exception { + final int messageSum = 3; + Awaitility.await().untilAsserted(() -> { + List keys = pulsar1.getBrokerService() + .getTopic(topic, false).get().get() + .getReplicators().keys(); + assertEquals(keys.size(), 1); + assertTrue(pulsar1.getBrokerService() + .getTopic(topic, false).get().get() + .getReplicators().get(keys.get(0)).isConnected()); + }); + // Test scheduled task case. + sendMessageAndWaitSnapshotComplete(producer, topic, messageSum); + // Test acknowledge messages case. + // After snapshot write completely, acknowledging message to move the mark delete position + // after the position recorded in the snapshot will trigger to write a new marker. + sendMessageAndWaitSnapshotComplete(producer, topic, messageSum); + String lastConfirmedEntry3 = admin1.topics().getInternalStats(topic, false).lastConfirmedEntry; + for (int i = 0; i < messageSum * 2 - 1; i++) { + consumer.acknowledge(consumer.receive(5, TimeUnit.SECONDS)); + } + Awaitility.await().untilAsserted(() -> { + String lastConfirmedEntry4 = admin1.topics().getInternalStats(topic, false).lastConfirmedEntry; + assertNotEquals(lastConfirmedEntry3, lastConfirmedEntry4); + }); + // Clear environment. + consumer.acknowledge(consumer.receive(5, TimeUnit.SECONDS)); + Awaitility.await().untilAsserted(() -> { + long backlog4 = admin1.topics().getStats(topic, false).getBacklogSize(); + assertEquals(backlog4, 0); + }); + } + + private void sendMessageAndWaitSnapshotComplete(Producer producer, String topic, + int messageSum) throws Exception { + for (int i = 0; i < messageSum; i++) { + producer.newMessage().send(); + } + long backlog1 = admin1.topics().getStats(topic, false).getBacklogSize(); + Awaitility.await().untilAsserted(() -> { + long backlog2 = admin1.topics().getStats(topic, false).getBacklogSize(); + assertTrue(backlog2 > backlog1); + }); + // Wait snapshot write completely, stop writing marker into topic. + Awaitility.await().untilAsserted(() -> { + String lastConfirmedEntry1 = admin1.topics().getInternalStats(topic, false).lastConfirmedEntry; + PersistentTopicInternalStats persistentTopicInternalStats = admin1.topics().getInternalStats(topic, false); + Thread.sleep(1000); + String lastConfirmedEntry2 = admin1.topics().getInternalStats(topic, false).lastConfirmedEntry; + assertEquals(lastConfirmedEntry1, lastConfirmedEntry2); + }); + } + void publishMessages(Producer producer, int startIndex, int numMessages, Set sentMessages) throws PulsarClientException { for (int i = startIndex; i < startIndex + numMessages; i++) { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java index f710c8541d1b5..159d49ca2e7cc 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java @@ -83,6 +83,7 @@ import org.apache.pulsar.client.api.RawMessage; import org.apache.pulsar.client.api.RawReader; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.TypedMessageBuilder; import org.apache.pulsar.client.api.schema.GenericRecord; @@ -238,16 +239,14 @@ public void activeBrokerParse() throws Exception { pulsar1.getConfiguration().setAuthorizationEnabled(true); //init clusterData - String cluster2ServiceUrls = String.format("%s,localhost:1234,localhost:5678,localhost:5677,localhost:5676", - pulsar2.getWebServiceAddress()); - ClusterData cluster2Data = ClusterData.builder().serviceUrl(cluster2ServiceUrls).build(); + ClusterData cluster2Data = ClusterData.builder().serviceUrl(pulsar2.getWebServiceAddress()).build(); String cluster2 = "activeCLuster2"; admin2.clusters().createCluster(cluster2, cluster2Data); Awaitility.await().until(() -> admin2.clusters().getCluster(cluster2) != null); List list = admin1.brokers().getActiveBrokers(cluster2); - assertEquals(list.get(0), url2.toString().replace("http://", "")); + assertEquals(list.get(0), pulsar2.getBrokerId()); //restore configuration pulsar1.getConfiguration().setAuthorizationEnabled(false); } @@ -1803,4 +1802,74 @@ public void testReplicatorProducerNotExceed() throws Exception { Assert.assertThrows(PulsarClientException.ProducerBusyException.class, () -> new MessageProducer(url2, dest2)); } + + @Test + public void testReplicatorWithTTL() throws Exception { + log.info("--- Starting ReplicatorTest::testReplicatorWithTTL ---"); + + final String cluster1 = pulsar1.getConfig().getClusterName(); + final String cluster2 = pulsar2.getConfig().getClusterName(); + final String namespace = BrokerTestUtil.newUniqueName("pulsar/ns"); + final TopicName topic = TopicName + .get(BrokerTestUtil.newUniqueName("persistent://" + namespace + "/testReplicatorWithTTL")); + admin1.namespaces().createNamespace(namespace, Sets.newHashSet(cluster1, cluster2)); + admin1.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet(cluster1, cluster2)); + admin1.topics().createNonPartitionedTopic(topic.toString()); + admin1.topicPolicies().setMessageTTL(topic.toString(), 1); + + @Cleanup + PulsarClient client1 = PulsarClient.builder().serviceUrl(url1.toString()).statsInterval(0, TimeUnit.SECONDS) + .build(); + + @Cleanup + Producer persistentProducer1 = client1.newProducer().topic(topic.toString()).create(); + persistentProducer1.send("V1".getBytes()); + + waitReplicateFinish(topic, admin1); + + PersistentTopic persistentTopic = + (PersistentTopic) pulsar1.getBrokerService().getTopicReference(topic.toString()).get(); + persistentTopic.getReplicators().forEach((cluster, replicator) -> { + PersistentReplicator persistentReplicator = (PersistentReplicator) replicator; + // Pause replicator + persistentReplicator.disconnect(); + }); + + persistentProducer1.send("V2".getBytes()); + persistentProducer1.send("V3".getBytes()); + + Thread.sleep(1000); + + admin1.topics().expireMessagesForAllSubscriptions(topic.toString(), 1); + + persistentTopic.getReplicators().forEach((cluster, replicator) -> { + PersistentReplicator persistentReplicator = (PersistentReplicator) replicator; + persistentReplicator.startProducer(); + }); + + waitReplicateFinish(topic, admin1); + + persistentProducer1.send("V4".getBytes()); + + waitReplicateFinish(topic, admin1); + + @Cleanup + PulsarClient client2 = PulsarClient.builder().serviceUrl(url2.toString()).statsInterval(0, TimeUnit.SECONDS) + .build(); + + @Cleanup + Consumer consumer = client2.newConsumer().topic(topic.toString()) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscriptionName("sub").subscribe(); + + List result = new ArrayList<>(); + while (true) { + Message receive = consumer.receive(2, TimeUnit.SECONDS); + if (receive == null) { + break; + } + result.add(new String(receive.getValue())); + } + + assertEquals(result, Lists.newArrayList("V1", "V2", "V3", "V4")); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTestBase.java index b83e8ac9d2dbf..beb1a3c4b9309 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTestBase.java @@ -82,6 +82,13 @@ public abstract class ReplicatorTestBase extends TestRetrySupport { PulsarAdmin admin3; LocalBookkeeperEnsemble bkEnsemble3; + URL url4; + URL urlTls4; + ServiceConfiguration config4 = new ServiceConfiguration(); + PulsarService pulsar4; + PulsarAdmin admin4; + LocalBookkeeperEnsemble bkEnsemble4; + ZookeeperServerTest globalZkS; ExecutorService executor; @@ -111,6 +118,7 @@ public abstract class ReplicatorTestBase extends TestRetrySupport { protected final String cluster1 = "r1"; protected final String cluster2 = "r2"; protected final String cluster3 = "r3"; + protected final String cluster4 = "r4"; // Default frequency public int getBrokerServicePurgeInactiveFrequency() { @@ -178,6 +186,21 @@ protected void setup() throws Exception { urlTls3 = new URL(pulsar3.getWebServiceAddressTls()); admin3 = PulsarAdmin.builder().serviceHttpUrl(url3.toString()).build(); + // Start region 4 + + // Start zk & bks + bkEnsemble4 = new LocalBookkeeperEnsemble(3, 0, () -> 0); + bkEnsemble4.start(); + + setConfig4DefaultValue(); + pulsar4 = new PulsarService(config4); + pulsar4.start(); + + url4 = new URL(pulsar4.getWebServiceAddress()); + urlTls4 = new URL(pulsar4.getWebServiceAddressTls()); + admin4 = PulsarAdmin.builder().serviceHttpUrl(url4.toString()).build(); + + // Provision the global namespace admin1.clusters().createCluster(cluster1, ClusterData.builder() .serviceUrl(url1.toString()) @@ -230,6 +253,23 @@ protected void setup() throws Exception { .brokerClientTlsTrustStorePassword(keyStorePassword) .brokerClientTlsTrustStoreType(keyStoreType) .build()); + admin4.clusters().createCluster(cluster4, ClusterData.builder() + .serviceUrl(url4.toString()) + .serviceUrlTls(urlTls4.toString()) + .brokerServiceUrl(pulsar4.getBrokerServiceUrl()) + .brokerServiceUrlTls(pulsar4.getBrokerServiceUrlTls()) + .brokerClientTlsEnabled(true) + .brokerClientCertificateFilePath(clientCertFilePath) + .brokerClientKeyFilePath(clientKeyFilePath) + .brokerClientTrustCertsFilePath(caCertFilePath) + .brokerClientTlsEnabledWithKeyStore(tlsWithKeyStore) + .brokerClientTlsKeyStore(clientKeyStorePath) + .brokerClientTlsKeyStorePassword(keyStorePassword) + .brokerClientTlsKeyStoreType(keyStoreType) + .brokerClientTlsTrustStore(clientTrustStorePath) + .brokerClientTlsTrustStorePassword(keyStorePassword) + .brokerClientTlsTrustStoreType(keyStoreType) + .build()); admin1.tenants().createTenant("pulsar", new TenantInfoImpl(Sets.newHashSet("appid1", "appid2", "appid3"), Sets.newHashSet("r1", "r2", "r3"))); @@ -257,7 +297,7 @@ protected void setup() throws Exception { } public void setConfig3DefaultValue() { - setConfigDefaults(config3, "r3", bkEnsemble3); + setConfigDefaults(config3, cluster3, bkEnsemble3); config3.setTlsEnabled(true); } @@ -269,6 +309,11 @@ public void setConfig2DefaultValue() { setConfigDefaults(config2, cluster2, bkEnsemble2); } + public void setConfig4DefaultValue() { + setConfigDefaults(config4, cluster4, bkEnsemble4); + config4.setEnableReplicatedSubscriptions(false); + } + private void setConfigDefaults(ServiceConfiguration config, String clusterName, LocalBookkeeperEnsemble bookkeeperEnsemble) { config.setClusterName(clusterName); @@ -316,6 +361,11 @@ public void resetConfig3() { setConfig3DefaultValue(); } + public void resetConfig4() { + config4 = new ServiceConfiguration(); + setConfig4DefaultValue(); + } + private int inSec(int time, TimeUnit unit) { return (int) TimeUnit.SECONDS.convert(time, unit); } @@ -332,7 +382,11 @@ protected void cleanup() throws Exception { admin1.close(); admin2.close(); admin3.close(); + admin4.close(); + if (pulsar4 != null) { + pulsar4.close(); + } if (pulsar3 != null) { pulsar3.close(); } @@ -346,11 +400,13 @@ protected void cleanup() throws Exception { bkEnsemble1.stop(); bkEnsemble2.stop(); bkEnsemble3.stop(); + bkEnsemble4.stop(); globalZkS.stop(); resetConfig1(); resetConfig2(); resetConfig3(); + resetConfig4(); } static class MessageProducer implements AutoCloseable { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java index c3bab634a42c1..8abd6dcff8de4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java @@ -45,8 +45,11 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandler; +import io.netty.channel.DefaultChannelId; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import io.vertx.core.impl.ConcurrentHashSet; +import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Field; import java.nio.charset.StandardCharsets; @@ -60,10 +63,15 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Predicate; import java.util.function.Supplier; +import lombok.AllArgsConstructor; +import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.AsyncCallbacks.AddEntryCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.CloseCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteCursorCallback; @@ -74,6 +82,7 @@ import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.commons.lang3.mutable.MutableInt; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.auth.MockAlwaysExpiredAuthenticationProvider; @@ -93,6 +102,7 @@ import org.apache.pulsar.broker.service.ServerCnx.State; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.service.utils.ClientChannelHelper; +import org.apache.pulsar.client.api.ProducerAccessMode; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.common.api.AuthData; import org.apache.pulsar.common.api.proto.AuthMethod; @@ -113,6 +123,7 @@ import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespaceResponse; import org.apache.pulsar.common.api.proto.CommandLookupTopicResponse; import org.apache.pulsar.common.api.proto.CommandPartitionedTopicMetadataResponse; +import org.apache.pulsar.common.api.proto.CommandPing; import org.apache.pulsar.common.api.proto.CommandProducerSuccess; import org.apache.pulsar.common.api.proto.CommandSendError; import org.apache.pulsar.common.api.proto.CommandSendReceipt; @@ -135,6 +146,7 @@ import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.Commands.ChecksumType; import org.apache.pulsar.common.protocol.PulsarHandler; +import org.apache.pulsar.common.protocol.schema.EmptyVersion; import org.apache.pulsar.common.topics.TopicList; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.collections.ConcurrentLongHashMap; @@ -149,6 +161,7 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; +@Slf4j @SuppressWarnings("unchecked") @Test(groups = "broker") public class ServerCnxTest { @@ -184,10 +197,12 @@ public class ServerCnxTest { private ManagedLedger ledgerMock; private ManagedCursor cursorMock; + private ConcurrentHashSet channelsStoppedAnswerHealthCheck = new ConcurrentHashSet<>(); @BeforeMethod(alwaysRun = true) public void setup() throws Exception { + channelsStoppedAnswerHealthCheck.clear(); svcConfig = new ServiceConfiguration(); svcConfig.setBrokerShutdownTimeoutMs(0L); svcConfig.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); @@ -496,6 +511,41 @@ public void testConnectCommandWithPassingOriginalAuthData() throws Exception { channel.finish(); } + @Test(timeOut = 30000) + public void testConnectCommandWithPassingOriginalAuthDataAndSetAnonymousUserRole() throws Exception { + AuthenticationService authenticationService = mock(AuthenticationService.class); + AuthenticationProvider authenticationProvider = new MockAuthenticationProvider(); + String authMethodName = authenticationProvider.getAuthMethodName(); + + String anonymousUserRole = "admin"; + when(brokerService.getAuthenticationService()).thenReturn(authenticationService); + when(authenticationService.getAuthenticationProvider(authMethodName)).thenReturn(authenticationProvider); + when(authenticationService.getAnonymousUserRole()).thenReturn(Optional.of(anonymousUserRole)); + svcConfig.setAuthenticationEnabled(true); + svcConfig.setAuthenticateOriginalAuthData(true); + svcConfig.setProxyRoles(Collections.singleton("pass.proxy")); + svcConfig.setAnonymousUserRole(anonymousUserRole); + + resetChannel(); + assertTrue(channel.isActive()); + assertEquals(serverCnx.getState(), State.Start); + + // When both the proxy and the broker set the anonymousUserRole option + // the proxy will use anonymousUserRole to delegate the client's role when connecting. + ByteBuf clientCommand = Commands.newConnect(authMethodName, "pass.proxy", 1, null, + null, anonymousUserRole, null, null); + channel.writeInbound(clientCommand); + + Object response1 = getResponse(); + assertTrue(response1 instanceof CommandConnected); + assertEquals(serverCnx.getState(), State.Connected); + assertEquals(serverCnx.getAuthRole(), anonymousUserRole); + assertEquals(serverCnx.getPrincipal(), anonymousUserRole); + assertEquals(serverCnx.getOriginalPrincipal(), anonymousUserRole); + assertTrue(serverCnx.isActive()); + channel.finish(); + } + @Test(timeOut = 30000) public void testConnectCommandWithPassingOriginalPrincipal() throws Exception { AuthenticationService authenticationService = mock(AuthenticationService.class); @@ -927,6 +977,232 @@ public void testVerifyOriginalPrincipalWithAuthDataForwardedFromProxy() throws E })); } + @Test + public void testDuplicateProducer() throws Exception { + final String tName = successTopicName; + final long producerId = 1; + final MutableInt requestId = new MutableInt(1); + final MutableInt epoch = new MutableInt(1); + final Map metadata = Collections.emptyMap(); + final String pName = "p1"; + resetChannel(); + setChannelConnected(); + + // The producer register using the first connection. + ByteBuf cmdProducer1 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + channel.writeInbound(cmdProducer1); + assertTrue(getResponse() instanceof CommandProducerSuccess); + PersistentTopic topicRef = (PersistentTopic) brokerService.getTopicReference(tName).get(); + assertNotNull(topicRef); + assertEquals(topicRef.getProducers().size(), 1); + + // Verify the second producer will be reject due to the previous one still is active. + // Every second try once, total 10 times, all requests should fail. + ClientChannel channel2 = new ClientChannel(); + BackGroundExecutor backGroundExecutor1 = startBackgroundExecutorForEmbeddedChannel(channel); + BackGroundExecutor autoResponseForHeartBeat = autoResponseForHeartBeat(channel, clientChannelHelper); + BackGroundExecutor backGroundExecutor2 = startBackgroundExecutorForEmbeddedChannel(channel2.channel); + setChannelConnected(channel2.serverCnx); + + for (int i = 0; i < 10; i++) { + ByteBuf cmdProducer2 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + channel2.channel.writeInbound(cmdProducer2); + Object response2 = getResponse(channel2.channel, channel2.clientChannelHelper); + assertTrue(response2 instanceof CommandError); + assertEquals(topicRef.getProducers().size(), 1); + assertTrue(channel.isActive()); + Thread.sleep(500); + } + + // cleanup. + autoResponseForHeartBeat.close(); + backGroundExecutor1.close(); + backGroundExecutor2.close(); + channel.finish(); + channel2.close(); + } + + @Test + public void testProducerChangeSocket() throws Exception { + final String tName = successTopicName; + final long producerId = 1; + final MutableInt requestId = new MutableInt(1); + final MutableInt epoch = new MutableInt(1); + final Map metadata = Collections.emptyMap(); + final String pName = "p1"; + resetChannel(); + setChannelConnected(); + + // The producer register using the first connection. + ByteBuf cmdProducer1 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + channel.writeInbound(cmdProducer1); + assertTrue(getResponse() instanceof CommandProducerSuccess); + PersistentTopic topicRef = (PersistentTopic) brokerService.getTopicReference(tName).get(); + assertNotNull(topicRef); + assertEquals(topicRef.getProducers().size(), 1); + + // Verify the second producer using a new connection will override the producer who using a stopped channel. + channelsStoppedAnswerHealthCheck.add(channel); + ClientChannel channel2 = new ClientChannel(); + BackGroundExecutor backGroundExecutor1 = startBackgroundExecutorForEmbeddedChannel(channel); + BackGroundExecutor backGroundExecutor2 = startBackgroundExecutorForEmbeddedChannel(channel2.channel); + setChannelConnected(channel2.serverCnx); + + ByteBuf cmdProducer2 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + channel2.channel.writeInbound(cmdProducer2); + Object response2 = getResponse(channel2.channel, channel2.clientChannelHelper); + assertTrue(response2 instanceof CommandProducerSuccess); + assertEquals(topicRef.getProducers().size(), 1); + + // cleanup. + channelsStoppedAnswerHealthCheck.clear(); + backGroundExecutor1.close(); + backGroundExecutor2.close(); + channel.finish(); + channel2.close(); + } + + /** + * When a channel typed "EmbeddedChannel", once we call channel.execute(runnable), there is no background thread + * to run it. + * So starting a background thread to trigger the tasks in the queue. + */ + private BackGroundExecutor startBackgroundExecutorForEmbeddedChannel(final EmbeddedChannel channel) { + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + ScheduledFuture scheduledFuture = executor.scheduleWithFixedDelay(() -> { + channel.runPendingTasks(); + }, 100, 100, TimeUnit.MILLISECONDS); + return new BackGroundExecutor(executor, scheduledFuture); + } + + /** + * Auto answer `Pong` for the `Cmd-Ping`. + * Node: This will result in additional threads pop Command from the Command queue, so do not call this + * method if the channel needs to accept other Command. + */ + private BackGroundExecutor autoResponseForHeartBeat(EmbeddedChannel channel, + ClientChannelHelper clientChannelHelper) { + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + ScheduledFuture scheduledFuture = executor.scheduleWithFixedDelay(() -> { + tryPeekResponse(channel, clientChannelHelper); + }, 100, 100, TimeUnit.MILLISECONDS); + return new BackGroundExecutor(executor, scheduledFuture); + } + + @AllArgsConstructor + private static class BackGroundExecutor implements Closeable { + + private ScheduledExecutorService executor; + + private ScheduledFuture scheduledFuture; + + @Override + public void close() throws IOException { + if (scheduledFuture != null) { + scheduledFuture.cancel(true); + } + executor.shutdown(); + } + } + + private class ClientChannel implements Closeable { + private ClientChannelHelper clientChannelHelper = new ClientChannelHelper(); + private ServerCnx serverCnx = new ServerCnx(pulsar); + private EmbeddedChannel channel = new EmbeddedChannel(DefaultChannelId.newInstance(), + new LengthFieldBasedFrameDecoder( + 5 * 1024 * 1024, + 0, + 4, + 0, + 4), + serverCnx); + public ClientChannel() { + serverCnx.setAuthRole(""); + } + public void close(){ + if (channel != null && channel.isActive()) { + serverCnx.close(); + channel.close(); + } + } + } + + @Test + public void testHandleProducer() throws Exception { + final String tName = "persistent://public/default/test-topic"; + final long producerId = 1; + final MutableInt requestId = new MutableInt(1); + final MutableInt epoch = new MutableInt(1); + final Map metadata = Collections.emptyMap(); + final String pName = "p1"; + resetChannel(); + assertTrue(channel.isActive()); + assertEquals(serverCnx.getState(), State.Start); + + // connect. + ByteBuf cConnect = Commands.newConnect("none", "", null); + channel.writeInbound(cConnect); + assertEquals(serverCnx.getState(), State.Connected); + assertTrue(getResponse() instanceof CommandConnected); + + // There is an in-progress producer registration. + ByteBuf cProducer1 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + CompletableFuture existingFuture1 = new CompletableFuture(); + serverCnx.getProducers().put(producerId, existingFuture1); + channel.writeInbound(cProducer1); + Object response1 = getResponse(); + assertTrue(response1 instanceof CommandError); + CommandError error1 = (CommandError) response1; + assertEquals(error1.getError().toString(), ServerError.ServiceNotReady.toString()); + assertTrue(error1.getMessage().contains("already present on the connection")); + + // There is a failed registration. + ByteBuf cProducer2 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + CompletableFuture existingFuture2 = new CompletableFuture(); + existingFuture2.completeExceptionally(new BrokerServiceException.ProducerBusyException("123")); + serverCnx.getProducers().put(producerId, existingFuture2); + + channel.writeInbound(cProducer2); + Object response2 = getResponse(); + assertTrue(response2 instanceof CommandError); + CommandError error2 = (CommandError) response2; + assertEquals(error2.getError().toString(), ServerError.ProducerBusy.toString()); + assertTrue(error2.getMessage().contains("already failed to register present on the connection")); + + // There is an successful registration. + ByteBuf cProducer3 = Commands.newProducer(tName, producerId, requestId.incrementAndGet(), + pName, false, metadata, null, epoch.incrementAndGet(), false, + ProducerAccessMode.Shared, Optional.empty(), false); + CompletableFuture existingFuture3 = new CompletableFuture(); + org.apache.pulsar.broker.service.Producer serviceProducer = + mock(org.apache.pulsar.broker.service.Producer.class); + when(serviceProducer.getProducerName()).thenReturn(pName); + when(serviceProducer.getSchemaVersion()).thenReturn(new EmptyVersion()); + existingFuture3.complete(serviceProducer); + serverCnx.getProducers().put(producerId, existingFuture3); + + channel.writeInbound(cProducer3); + Object response3 = getResponse(); + assertTrue(response3 instanceof CommandProducerSuccess); + CommandProducerSuccess cProducerSuccess = (CommandProducerSuccess) response3; + assertEquals(cProducerSuccess.getProducerName(), pName); + + // cleanup. + channel.finish(); + } + // This test used to be in the ServerCnxAuthorizationTest class, but it was migrated here because the mocking // in that class was too extensive. There is some overlap with this test and other tests in this class. The primary // role of this test is verifying that the correct role and AuthenticationDataSource are passed to the @@ -2471,6 +2747,10 @@ protected void resetChannel() throws Exception { } protected void setChannelConnected() throws Exception { + setChannelConnected(serverCnx); + } + + protected void setChannelConnected(ServerCnx serverCnx) throws Exception { Field channelState = ServerCnx.class.getDeclaredField("state"); channelState.setAccessible(true); channelState.set(serverCnx, State.Connected); @@ -2484,13 +2764,31 @@ private void setConnectionVersion(int version) throws Exception { } protected Object getResponse() throws Exception { + return getResponse(channel, clientChannelHelper); + } + + protected Object getResponse(EmbeddedChannel channel, ClientChannelHelper clientChannelHelper) throws Exception { // Wait at most for 10s to get a response final long sleepTimeMs = 10; final long iterations = TimeUnit.SECONDS.toMillis(10) / sleepTimeMs; for (int i = 0; i < iterations; i++) { if (!channel.outboundMessages().isEmpty()) { Object outObject = channel.outboundMessages().remove(); - return clientChannelHelper.getCommand(outObject); + Object cmd = clientChannelHelper.getCommand(outObject); + if (cmd instanceof CommandPing) { + if (channelsStoppedAnswerHealthCheck.contains(channel)) { + continue; + } + channel.writeAndFlush(Commands.newPong()).addListener(future -> { + if (!future.isSuccess()) { + log.warn("[{}] Forcing connection to close since cannot send a pong message.", + channel, future.cause()); + channel.close(); + } + }); + continue; + } + return cmd; } else { Thread.sleep(sleepTimeMs); } @@ -2499,6 +2797,26 @@ protected Object getResponse() throws Exception { throw new IOException("Failed to get response from socket within 10s"); } + protected Object tryPeekResponse(EmbeddedChannel channel, ClientChannelHelper clientChannelHelper) { + while (true) { + if (channel.outboundMessages().isEmpty()) { + return null; + } else { + Object outObject = channel.outboundMessages().peek(); + Object cmd = clientChannelHelper.getCommand(outObject); + if (cmd instanceof CommandPing) { + if (channelsStoppedAnswerHealthCheck.contains(channel)) { + continue; + } + channel.writeInbound(Commands.newPong()); + channel.outboundMessages().remove(); + continue; + } + return cmd; + } + } + } + private void setupMLAsyncCallbackMocks() { ledgerMock = mock(ManagedLedger.class); cursorMock = mock(ManagedCursor.class); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java index 31b5bcb23cd98..343a18da6d8a9 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java @@ -18,9 +18,6 @@ */ package org.apache.pulsar.broker.service; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.testng.AssertJUnit.assertEquals; import static org.testng.AssertJUnit.assertNotNull; @@ -36,18 +33,18 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.service.BrokerServiceException.TopicPoliciesCacheNotInitException; -import org.apache.pulsar.broker.systopic.NamespaceEventsSystemTopicFactory; import org.apache.pulsar.broker.systopic.SystemTopicClient; -import org.apache.pulsar.broker.systopic.TopicPoliciesSystemTopicClient; import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.client.impl.BackoffBuilder; import org.apache.pulsar.common.events.PulsarEvent; @@ -56,7 +53,6 @@ import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicPolicies; -import org.apache.pulsar.common.util.FutureUtil; import org.assertj.core.api.Assertions; import org.awaitility.Awaitility; import org.mockito.Mockito; @@ -73,6 +69,8 @@ public class SystemTopicBasedTopicPoliciesServiceTest extends MockedPulsarServic private static final String NAMESPACE2 = "system-topic/namespace-2"; private static final String NAMESPACE3 = "system-topic/namespace-3"; + private static final String NAMESPACE4 = "system-topic/namespace-4"; + private static final TopicName TOPIC1 = TopicName.get("persistent", NamespaceName.get(NAMESPACE1), "topic-1"); private static final TopicName TOPIC2 = TopicName.get("persistent", NamespaceName.get(NAMESPACE1), "topic-2"); private static final TopicName TOPIC3 = TopicName.get("persistent", NamespaceName.get(NAMESPACE2), "topic-1"); @@ -141,7 +139,7 @@ public void testGetPolicy() throws ExecutionException, InterruptedException, Top // Wait for all topic policies updated. Awaitility.await().untilAsserted(() -> Assert.assertTrue(systemTopicBasedTopicPoliciesService - .getPoliciesCacheInit(TOPIC1.getNamespaceObject()))); + .getPoliciesCacheInit(TOPIC1.getNamespaceObject()).isDone())); // Assert broker is cache all topic policies Awaitility.await().untilAsserted(() -> @@ -304,8 +302,8 @@ private void prepareData() throws PulsarAdminException { @Test public void testGetPolicyTimeout() throws Exception { SystemTopicBasedTopicPoliciesService service = (SystemTopicBasedTopicPoliciesService) pulsar.getTopicPoliciesService(); - Awaitility.await().untilAsserted(() -> assertTrue(service.policyCacheInitMap.get(TOPIC1.getNamespaceObject()))); - service.policyCacheInitMap.put(TOPIC1.getNamespaceObject(), false); + Awaitility.await().untilAsserted(() -> assertTrue(service.policyCacheInitMap.get(TOPIC1.getNamespaceObject()).isDone())); + service.policyCacheInitMap.put(TOPIC1.getNamespaceObject(), new CompletableFuture<>()); long start = System.currentTimeMillis(); Backoff backoff = new BackoffBuilder() .setInitialTime(500, TimeUnit.MILLISECONDS) @@ -321,28 +319,6 @@ public void testGetPolicyTimeout() throws Exception { assertTrue("actual:" + cost, cost >= 5000 - 1000); } - @Test - public void testCreatSystemTopicClientWithRetry() throws Exception { - SystemTopicBasedTopicPoliciesService service = - spy((SystemTopicBasedTopicPoliciesService) pulsar.getTopicPoliciesService()); - Field field = SystemTopicBasedTopicPoliciesService.class - .getDeclaredField("namespaceEventsSystemTopicFactory"); - field.setAccessible(true); - NamespaceEventsSystemTopicFactory factory = spy((NamespaceEventsSystemTopicFactory) field.get(service)); - SystemTopicClient client = mock(TopicPoliciesSystemTopicClient.class); - doReturn(client).when(factory).createTopicPoliciesSystemTopicClient(any()); - field.set(service, factory); - - SystemTopicClient.Reader reader = mock(SystemTopicClient.Reader.class); - // Throw an exception first, create successfully after retrying - doReturn(FutureUtil.failedFuture(new PulsarClientException("test"))) - .doReturn(CompletableFuture.completedFuture(reader)).when(client).newReaderAsync(); - - SystemTopicClient.Reader reader1 = service.createSystemTopicClientWithRetry(null).get(); - - assertEquals(reader1, reader); - } - @Test public void testGetTopicPoliciesWithRetry() throws Exception { Field initMapField = SystemTopicBasedTopicPoliciesService.class.getDeclaredField("policyCacheInitMap"); @@ -457,4 +433,35 @@ public void testGetTopicPoliciesWithCleanCache() throws Exception { result.join(); } + + @Test + public void testWriterCache() throws Exception { + admin.namespaces().createNamespace(NAMESPACE4); + for (int i = 1; i <= 5; i ++) { + final String topicName = "persistent://" + NAMESPACE4 + "/testWriterCache" + i; + admin.topics().createNonPartitionedTopic(topicName); + pulsarClient.newProducer(Schema.STRING).topic(topicName).create().close(); + } + @Cleanup("shutdown") + ExecutorService executorService = Executors.newFixedThreadPool(5); + for (int i = 1; i <= 5; i ++) { + int finalI = i; + executorService.execute(() -> { + final String topicName = "persistent://" + NAMESPACE4 + "/testWriterCache" + finalI; + try { + admin.topicPolicies().setMaxConsumers(topicName, 2); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + SystemTopicBasedTopicPoliciesService service = (SystemTopicBasedTopicPoliciesService) pulsar.getTopicPoliciesService(); + Assert.assertNotNull(service.getWriterCaches().synchronous().get(NamespaceName.get(NAMESPACE4))); + for (int i = 1; i <= 5; i ++) { + final String topicName = "persistent://" + NAMESPACE4 + "/testWriterCache" + i; + admin.topics().delete(topicName); + } + admin.namespaces().deleteNamespace(NAMESPACE4); + Assert.assertNull(service.getWriterCaches().synchronous().getIfPresent(NamespaceName.get(NAMESPACE4))); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TopicGCTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TopicGCTest.java new file mode 100644 index 0000000000000..7790940c1327f --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TopicGCTest.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import static org.testng.Assert.assertTrue; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import lombok.EqualsAndHashCode; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.common.policies.data.InactiveTopicDeleteMode; +import org.awaitility.Awaitility; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class TopicGCTest extends ProducerConsumerBase { + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @EqualsAndHashCode.Include + protected void doInitConf() throws Exception { + super.doInitConf(); + this.conf.setBrokerDeleteInactiveTopicsEnabled(true); + this.conf.setBrokerDeleteInactiveTopicsMode(InactiveTopicDeleteMode.delete_when_subscriptions_caught_up); + this.conf.setBrokerDeleteInactiveTopicsFrequencySeconds(10); + } + + @Test + public void testCreateConsumerAfterOnePartDeleted() throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + final String partition0 = topic + "-partition-0"; + final String partition1 = topic + "-partition-1"; + final String subscription = "s1"; + admin.topics().createPartitionedTopic(topic, 2); + admin.topics().createSubscription(topic, subscription, MessageId.earliest); + + // create consumers and producers. + Producer producer0 = pulsarClient.newProducer(Schema.STRING).topic(partition0) + .enableBatching(false).create(); + Producer producer1 = pulsarClient.newProducer(Schema.STRING).topic(partition1) + .enableBatching(false).create(); + org.apache.pulsar.client.api.Consumer consumer1 = pulsarClient.newConsumer(Schema.STRING).topic(topic) + .subscriptionName(subscription).isAckReceiptEnabled(true).subscribe(); + + // Make consume all messages for one topic, do not consume any messages for another one. + producer0.send("1"); + producer1.send("2"); + admin.topics().skipAllMessages(partition0, subscription); + + // Wait for topic GC. + // Partition 0 will be deleted about 20s later, left 2min to avoid flaky. + producer0.close(); + consumer1.close(); + Awaitility.await().atMost(2, TimeUnit.MINUTES).untilAsserted(() -> { + CompletableFuture> tp1 = pulsar.getBrokerService().getTopic(partition0, false); + CompletableFuture> tp2 = pulsar.getBrokerService().getTopic(partition1, false); + assertTrue(tp1 == null || !tp1.get().isPresent()); + assertTrue(tp2 != null && tp2.get().isPresent()); + }); + + // Verify that the consumer subscribed with partitioned topic can be created successful. + Consumer consumerAllPartition = pulsarClient.newConsumer(Schema.STRING).topic(topic) + .subscriptionName(subscription).isAckReceiptEnabled(true).subscribe(); + Message msg = consumerAllPartition.receive(2, TimeUnit.SECONDS); + String receivedMsgValue = msg.getValue(); + log.info("received msg: {}", receivedMsgValue); + consumerAllPartition.acknowledge(msg); + + // cleanup. + consumerAllPartition.close(); + producer0.close(); + producer1.close(); + admin.topics().deletePartitionedTopic(topic); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/BucketDelayedDeliveryTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/BucketDelayedDeliveryTest.java index 0a82b2b4c3cb0..54fec3934ddbc 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/BucketDelayedDeliveryTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/BucketDelayedDeliveryTest.java @@ -20,10 +20,13 @@ import static org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.CURSOR_INTERNAL_PROPERTY_PREFIX; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; import com.google.common.collect.Multimap; import java.io.ByteArrayOutputStream; import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; @@ -32,6 +35,7 @@ import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.commons.lang3.mutable.MutableInt; import org.apache.pulsar.broker.BrokerTestUtil; @@ -40,6 +44,7 @@ import org.apache.pulsar.broker.stats.PrometheusMetricsTest; import org.apache.pulsar.broker.stats.prometheus.PrometheusMetricsGenerator; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; @@ -47,6 +52,7 @@ import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @Test(groups = "broker") @@ -353,4 +359,121 @@ public void testDelete() throws Exception { } } } + + @DataProvider(name = "subscriptionTypes") + public Object[][] subscriptionTypes() { + return new Object[][]{ + {SubscriptionType.Shared}, + {SubscriptionType.Key_Shared}, + {SubscriptionType.Failover}, + {SubscriptionType.Exclusive}, + }; + } + + /** + * see: https://github.com/apache/pulsar/pull/21595. + */ + @Test(dataProvider = "subscriptionTypes") + public void testDeleteTopicIfCursorPropsEmpty(SubscriptionType subscriptionType) throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String subscriptionName = "s1"; + // create a topic. + admin.topics().createNonPartitionedTopic(topic); + // create a subscription without props. + admin.topics().createSubscription(topic, subscriptionName, MessageId.earliest); + pulsarClient.newConsumer().topic(topic).subscriptionName(subscriptionName) + .subscriptionType(subscriptionType).subscribe().close(); + ManagedCursorImpl cursor = findCursor(topic, subscriptionName); + assertNotNull(cursor); + assertTrue(cursor.getCursorProperties() == null || cursor.getCursorProperties().isEmpty()); + // Test topic deletion is successful. + admin.topics().delete(topic); + } + + /** + * see: https://github.com/apache/pulsar/pull/21595. + */ + @Test(dataProvider = "subscriptionTypes") + public void testDeletePartitionedTopicIfCursorPropsEmpty(SubscriptionType subscriptionType) throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String subscriptionName = "s1"; + // create a topic. + admin.topics().createPartitionedTopic(topic, 2); + // create a subscription without props. + admin.topics().createSubscription(topic, subscriptionName, MessageId.earliest); + pulsarClient.newConsumer().topic(topic).subscriptionName(subscriptionName) + .subscriptionType(subscriptionType).subscribe().close(); + ManagedCursorImpl cursor = findCursor(topic + "-partition-0", subscriptionName); + assertNotNull(cursor); + assertTrue(cursor.getCursorProperties() == null || cursor.getCursorProperties().isEmpty()); + // Test topic deletion is successful. + admin.topics().deletePartitionedTopic(topic); + } + + /** + * see: https://github.com/apache/pulsar/pull/21595. + */ + @Test(dataProvider = "subscriptionTypes") + public void testDeleteTopicIfCursorPropsNotEmpty(SubscriptionType subscriptionType) throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String subscriptionName = "s1"; + // create a topic. + admin.topics().createNonPartitionedTopic(topic); + // create a subscription without props. + admin.topics().createSubscription(topic, subscriptionName, MessageId.earliest); + pulsarClient.newConsumer().topic(topic).subscriptionName(subscriptionName) + .subscriptionType(subscriptionType).subscribe().close(); + ManagedCursorImpl cursor = findCursor(topic, subscriptionName); + assertNotNull(cursor); + assertTrue(cursor.getCursorProperties() == null || cursor.getCursorProperties().isEmpty()); + // Put a subscription prop. + Map properties = new HashMap<>(); + properties.put("ignore", "ignore"); + admin.topics().updateSubscriptionProperties(topic, subscriptionName, properties); + assertTrue(cursor.getCursorProperties() != null && !cursor.getCursorProperties().isEmpty()); + // Test topic deletion is successful. + admin.topics().delete(topic); + } + + /** + * see: https://github.com/apache/pulsar/pull/21595. + */ + @Test(dataProvider = "subscriptionTypes") + public void testDeletePartitionedTopicIfCursorPropsNotEmpty(SubscriptionType subscriptionType) throws Exception { + final String topic = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String subscriptionName = "s1"; + // create a topic. + admin.topics().createPartitionedTopic(topic, 2); + pulsarClient.newProducer().topic(topic).create().close(); + // create a subscription without props. + admin.topics().createSubscription(topic, subscriptionName, MessageId.earliest); + pulsarClient.newConsumer().topic(topic).subscriptionName(subscriptionName) + .subscriptionType(subscriptionType).subscribe().close(); + + ManagedCursorImpl cursor = findCursor(topic + "-partition-0", subscriptionName); + assertNotNull(cursor); + assertTrue(cursor.getCursorProperties() == null || cursor.getCursorProperties().isEmpty()); + // Put a subscription prop. + Map properties = new HashMap<>(); + properties.put("ignore", "ignore"); + admin.topics().updateSubscriptionProperties(topic, subscriptionName, properties); + assertTrue(cursor.getCursorProperties() != null && !cursor.getCursorProperties().isEmpty()); + // Test topic deletion is successful. + admin.topics().deletePartitionedTopic(topic); + } + + + private ManagedCursorImpl findCursor(String topic, String subscriptionName) { + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topic, false).join().get(); + Iterator cursorIterator = persistentTopic.getManagedLedger().getCursors().iterator(); + while (cursorIterator.hasNext()) { + ManagedCursor managedCursor = cursorIterator.next(); + if (managedCursor == null || !managedCursor.getName().equals(subscriptionName)) { + continue; + } + return (ManagedCursorImpl) managedCursor; + } + return null; + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java index a665681528114..1dcd477ca53f3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java @@ -32,6 +32,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; import io.netty.buffer.ByteBuf; import io.netty.channel.EventLoopGroup; import java.lang.reflect.Field; @@ -47,16 +48,23 @@ import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.broker.service.BacklogQuotaManager; import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.broker.service.BrokerTestBase; import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.compaction.CompactionServiceFactory; +import org.awaitility.Awaitility; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @Slf4j @Test(groups = "broker") -public class MessageDuplicationTest { +public class MessageDuplicationTest extends BrokerTestBase { private static final int BROKER_DEDUPLICATION_ENTRIES_INTERVAL = 10; private static final int BROKER_DEDUPLICATION_MAX_NUMBER_PRODUCERS = 10; @@ -440,4 +448,43 @@ public void completed(Exception e, long ledgerId, long entryId) { } }); } + + @BeforeMethod(alwaysRun = true) + @Override + protected void setup() throws Exception { + this.conf.setBrokerDeduplicationEnabled(true); + super.baseSetup(); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testMessageDeduplication() throws Exception { + String topicName = "persistent://prop/ns-abc/testMessageDeduplication"; + String producerName = "test-producer"; + Producer producer = pulsarClient + .newProducer(Schema.STRING) + .producerName(producerName) + .topic(topicName) + .create(); + final PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService() + .getTopicIfExists(topicName).get().orElse(null); + assertNotNull(persistentTopic); + final MessageDeduplication messageDeduplication = persistentTopic.getMessageDeduplication(); + assertFalse(messageDeduplication.getInactiveProducers().containsKey(producerName)); + producer.close(); + Awaitility.await().untilAsserted(() -> assertTrue(messageDeduplication.getInactiveProducers().containsKey(producerName))); + admin.topicPolicies().setDeduplicationStatus(topicName, false); + Awaitility.await().untilAsserted(() -> { + final Boolean deduplicationStatus = admin.topicPolicies().getDeduplicationStatus(topicName); + Assert.assertNotNull(deduplicationStatus); + Assert.assertFalse(deduplicationStatus); + }); + messageDeduplication.purgeInactiveProducers(); + assertTrue(messageDeduplication.getInactiveProducers().isEmpty()); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java index 48a4bfc923608..7e1b5f8c71e6d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java @@ -250,7 +250,7 @@ public void testSkipRedeliverTemporally() { redeliverEntries.add(EntryImpl.create(1, 1, createMessage("message1", 1, "key1"))); final List readEntries = new ArrayList<>(); readEntries.add(EntryImpl.create(1, 2, createMessage("message2", 2, "key1"))); - readEntries.add(EntryImpl.create(1, 3, createMessage("message3", 3, "key2"))); + readEntries.add(EntryImpl.create(1, 3, createMessage("message3", 3, "key22"))); try { Field totalAvailablePermitsField = PersistentDispatcherMultipleConsumers.class.getDeclaredField("totalAvailablePermits"); @@ -346,7 +346,7 @@ public void testMessageRedelivery() throws Exception { // Messages with key1 are routed to consumer1 and messages with key2 are routed to consumer2 final List allEntries = new ArrayList<>(); - allEntries.add(EntryImpl.create(1, 1, createMessage("message1", 1, "key2"))); + allEntries.add(EntryImpl.create(1, 1, createMessage("message1", 1, "key22"))); allEntries.add(EntryImpl.create(1, 2, createMessage("message2", 2, "key1"))); allEntries.add(EntryImpl.create(1, 3, createMessage("message3", 3, "key1"))); allEntries.forEach(entry -> ((EntryImpl) entry).retain()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentTopicTest.java index bda6d0bdcebcf..717dfc28ac884 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentTopicTest.java @@ -45,6 +45,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; @@ -60,6 +61,7 @@ import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.service.BrokerTestBase; +import org.apache.pulsar.broker.service.TopicPoliciesService; import org.apache.pulsar.broker.stats.PrometheusMetricsTest; import org.apache.pulsar.broker.stats.prometheus.PrometheusMetricsGenerator; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -77,7 +79,9 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.Policies; +import org.apache.pulsar.common.policies.data.RetentionPolicies; import org.apache.pulsar.common.policies.data.TenantInfo; +import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.policies.data.TopicStats; import org.awaitility.Awaitility; import org.testng.Assert; @@ -291,7 +295,8 @@ public void testPersistentPartitionedTopicUnload() throws Exception { assertFalse(pulsar.getBrokerService().getTopics().containsKey(topicName)); pulsar.getBrokerService().getTopicIfExists(topicName).get(); - assertTrue(pulsar.getBrokerService().getTopics().containsKey(topicName)); + // The map topics should only contain partitions, does not contain partitioned topic. + assertFalse(pulsar.getBrokerService().getTopics().containsKey(topicName)); // ref of partitioned-topic name should be empty assertFalse(pulsar.getBrokerService().getTopicReference(topicName).isPresent()); @@ -453,8 +458,7 @@ public void testCreateNonExistentPartitions() throws PulsarAdminException, Pulsa .topic(partition.toString()) .create(); fail("unexpected behaviour"); - } catch (PulsarClientException.TopicDoesNotExistException ignored) { - + } catch (PulsarClientException.NotAllowedException ex) { } Assert.assertEquals(admin.topics().getPartitionedTopicMetadata(topicName).partitions, 4); } @@ -603,4 +607,58 @@ public void testCreateTopicWithZombieReplicatorCursor(boolean topicLevelPolicy) return !topic.getManagedLedger().getCursors().iterator().hasNext(); }); } + + @Test + public void testCheckPersistencePolicies() throws Exception { + final String myNamespace = "prop/ns"; + admin.namespaces().createNamespace(myNamespace, Sets.newHashSet("test")); + final String topic = "persistent://" + myNamespace + "/testConfig" + UUID.randomUUID(); + conf.setForceDeleteNamespaceAllowed(true); + pulsarClient.newProducer().topic(topic).create().close(); + RetentionPolicies retentionPolicies = new RetentionPolicies(1, 1); + PersistentTopic persistentTopic = spy((PersistentTopic) pulsar.getBrokerService().getTopicIfExists(topic).get().get()); + TopicPoliciesService policiesService = spy(pulsar.getTopicPoliciesService()); + doReturn(policiesService).when(pulsar).getTopicPoliciesService(); + TopicPolicies policies = new TopicPolicies(); + policies.setRetentionPolicies(retentionPolicies); + doReturn(CompletableFuture.completedFuture(Optional.of(policies))).when(policiesService).getTopicPoliciesAsync(TopicName.get(topic)); + persistentTopic.onUpdate(policies); + verify(persistentTopic, times(1)).checkPersistencePolicies(); + Awaitility.await().untilAsserted(() -> { + assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionSizeInMB(), 1L); + assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionTimeMillis(), TimeUnit.MINUTES.toMillis(1)); + }); + // throw exception + doReturn(CompletableFuture.failedFuture(new RuntimeException())).when(persistentTopic).checkPersistencePolicies(); + policies.setRetentionPolicies(new RetentionPolicies(2, 2)); + persistentTopic.onUpdate(policies); + assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionSizeInMB(), 1L); + assertEquals(persistentTopic.getManagedLedger().getConfig().getRetentionTimeMillis(), TimeUnit.MINUTES.toMillis(1)); + } + + @Test + public void testDynamicConfigurationAutoSkipNonRecoverableData() throws Exception { + pulsar.getConfiguration().setAutoSkipNonRecoverableData(false); + final String topicName = "persistent://prop/ns-abc/testAutoSkipNonRecoverableData"; + final String subName = "test_sub"; + + Consumer subscribe = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName).subscribe(); + PersistentTopic persistentTopic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topicName, false).join().get(); + PersistentSubscription subscription = persistentTopic.getSubscription(subName); + + assertFalse(persistentTopic.ledger.getConfig().isAutoSkipNonRecoverableData()); + assertFalse(subscription.getExpiryMonitor().isAutoSkipNonRecoverableData()); + + String key = "autoSkipNonRecoverableData"; + admin.brokers().updateDynamicConfiguration(key, "true"); + Awaitility.await() + .untilAsserted(() -> assertEquals(admin.brokers().getAllDynamicConfigurations().get(key), "true")); + + assertTrue(persistentTopic.ledger.getConfig().isAutoSkipNonRecoverableData()); + assertTrue(subscription.getExpiryMonitor().isAutoSkipNonRecoverableData()); + + subscribe.close(); + admin.topics().delete(topicName); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/TopicDuplicationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/TopicDuplicationTest.java index e57092d02dd5d..16721ca1203fd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/TopicDuplicationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/TopicDuplicationTest.java @@ -492,6 +492,43 @@ public void testDisableNamespacePolicyTakeSnapshot() throws Exception { } + @Test(timeOut = 30000) + public void testDisableNamespacePolicyTakeSnapshotShouldNotThrowException() throws Exception { + cleanup(); + conf.setBrokerDeduplicationEnabled(true); + conf.setBrokerDeduplicationSnapshotFrequencyInSeconds(1); + conf.setBrokerDeduplicationSnapshotIntervalSeconds(1); + conf.setBrokerDeduplicationEntriesInterval(20000); + setup(); + + final String topicName = testTopic + UUID.randomUUID().toString(); + final String producerName = "my-producer"; + @Cleanup + Producer producer = pulsarClient + .newProducer(Schema.STRING).topic(topicName).enableBatching(false).producerName(producerName).create(); + + // disable deduplication + admin.namespaces().setDeduplicationStatus(myNamespace, false); + + int msgNum = 50; + CountDownLatch countDownLatch = new CountDownLatch(msgNum); + for (int i = 0; i < msgNum; i++) { + producer.newMessage().value("msg" + i).sendAsync().whenComplete((res, e) -> countDownLatch.countDown()); + } + countDownLatch.await(); + PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService() + .getTopicIfExists(topicName).get().get(); + ManagedCursor managedCursor = persistentTopic.getMessageDeduplication().getManagedCursor(); + + // when disable topic deduplication the cursor should be deleted. + assertNull(managedCursor); + + // this method will be called at brokerService forEachTopic. + // if topic level disable deduplication. + // this method should be skipped without throw exception. + persistentTopic.checkDeduplicationSnapshot(); + } + private void waitCacheInit(String topicName) throws Exception { pulsarClient.newConsumer().topic(topicName).subscriptionName("my-sub").subscribe().close(); TopicName topic = TopicName.get(topicName); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java index b868858646c50..1c4f88bc0273c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/plugin/FilterEntryTest.java @@ -22,6 +22,7 @@ import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgsRecordingInvocations; import static org.apache.pulsar.client.api.SubscriptionInitialPosition.Earliest; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -30,8 +31,9 @@ import static org.testng.Assert.assertTrue; import static org.testng.AssertJUnit.assertEquals; import static org.testng.AssertJUnit.assertNotNull; - +import io.netty.buffer.ByteBuf; import java.lang.reflect.Field; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -39,7 +41,6 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; - import lombok.Cleanup; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; @@ -58,11 +59,15 @@ import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.RawMessage; +import org.apache.pulsar.client.api.RawReader; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.stats.AnalyzeSubscriptionBacklogResult; +import org.apache.pulsar.compaction.Compactor; import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -150,6 +155,58 @@ public void testOverride() throws Exception { consumer.close(); } + @Test + public void testEntryFilterWithCompactor() throws Exception { + conf.setAllowOverrideEntryFilters(true); + String topic = "persistent://prop/ns-abc/topic" + UUID.randomUUID(); + + List messages = new ArrayList<>(); + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topic).create(); + producer.newMessage().key("K1").value("V1").send(); + producer.newMessage().key("K2").value("V2").send(); + producer.newMessage().key("K3").value("V3").send(); + producer.newMessage().key("K4").value("V4").send(); + messages.add("V2"); + messages.add("V4"); + + PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topic).get(); + + // set topic level entry filters + EntryFilter mockFilter = mock(EntryFilter.class); + doAnswer(invocationOnMock -> { + FilterContext filterContext = invocationOnMock.getArgument(1); + String partitionKey = filterContext.getMsgMetadata().getPartitionKey(); + if (partitionKey.equals("K1") || partitionKey.equals("K3")) { + return EntryFilter.FilterResult.REJECT; + } else { + return EntryFilter.FilterResult.ACCEPT; + } + }).when(mockFilter).filterEntry(any(Entry.class), any(FilterContext.class)); + setMockFilterToTopic(topicRef, List.of(mockFilter)); + + List results = new ArrayList<>(); + RawReader rawReader = RawReader.create(pulsarClient, topic, Compactor.COMPACTION_SUBSCRIPTION).get(); + while (true) { + boolean hasMsg = rawReader.hasMessageAvailableAsync().get(); + if (hasMsg) { + try (RawMessage m = rawReader.readNextAsync().get()) { + ByteBuf headersAndPayload = m.getHeadersAndPayload(); + Commands.skipMessageMetadata(headersAndPayload); + byte[] bytes = new byte[headersAndPayload.readableBytes()]; + headersAndPayload.readBytes(bytes); + + results.add(new String(bytes)); + } + } else { + break; + } + } + rawReader.closeAsync().get(); + + Assert.assertEquals(messages, results); + } + @SneakyThrows private void setMockFilterToTopic(PersistentTopic topicRef, List mockFilter) { FieldUtils.writeField(topicRef, "entryFilters", Pair.of(null, mockFilter), true); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/SchemaServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/SchemaServiceTest.java index c7e30d5c3fc37..a7d33737bdc3c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/SchemaServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/SchemaServiceTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.service.schema; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.fail; import static org.testng.AssertJUnit.assertEquals; import static org.testng.AssertJUnit.assertFalse; @@ -40,16 +41,23 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.service.schema.SchemaRegistry.SchemaAndMetadata; import org.apache.pulsar.broker.stats.PrometheusMetricsTest; import org.apache.pulsar.broker.stats.prometheus.PrometheusMetricsGenerator; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.impl.schema.KeyValueSchemaInfo; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; +import org.apache.pulsar.common.protocol.schema.IsCompatibilityResponse; import org.apache.pulsar.common.protocol.schema.SchemaData; import org.apache.pulsar.common.protocol.schema.SchemaVersion; +import org.apache.pulsar.common.schema.KeyValueEncodingType; import org.apache.pulsar.common.schema.LongSchemaVersion; +import org.apache.pulsar.common.schema.SchemaInfo; +import org.apache.pulsar.common.schema.SchemaInfoWithVersion; import org.apache.pulsar.common.schema.SchemaType; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -93,6 +101,7 @@ protected void setup() throws Exception { Map checkMap = new HashMap<>(); checkMap.put(SchemaType.AVRO, new AvroSchemaCompatibilityCheck()); schemaRegistryService = new SchemaRegistryServiceImpl(storage, checkMap, MockClock, null); + setupDefaultTenantAndNamespace(); } @AfterMethod(alwaysRun = true) @@ -385,4 +394,33 @@ private static SchemaData getSchemaData(String schemaJson) { private SchemaVersion version(long version) { return new LongSchemaVersion(version); } + + @Test + public void testKeyValueSchema() throws Exception { + final String topicName = "persistent://public/default/testKeyValueSchema"; + admin.topics().createNonPartitionedTopic(BrokerTestUtil.newUniqueName(topicName)); + + final SchemaInfo schemaInfo = KeyValueSchemaInfo.encodeKeyValueSchemaInfo( + "keyValue", + SchemaInfo.builder().type(SchemaType.STRING).schema(new byte[0]) + .build(), + SchemaInfo.builder().type(SchemaType.BOOLEAN).schema(new byte[0]) + .build(), KeyValueEncodingType.SEPARATED); + assertThrows(PulsarAdminException.ServerSideErrorException.class, () -> admin.schemas().testCompatibility(topicName, schemaInfo)); + admin.schemas().createSchema(topicName, schemaInfo); + + final IsCompatibilityResponse isCompatibilityResponse = admin.schemas().testCompatibility(topicName, schemaInfo); + Assert.assertTrue(isCompatibilityResponse.isCompatibility()); + + admin.schemas().createSchema(topicName, schemaInfo); + + final SchemaInfoWithVersion schemaInfoWithVersion = admin.schemas().getSchemaInfoWithVersion(topicName); + Assert.assertEquals(schemaInfoWithVersion.getVersion(), 0); + + final Long version1 = admin.schemas().getVersionBySchema(topicName, schemaInfo); + Assert.assertEquals(version1, 0); + + final Long version2 = admin.schemas().getVersionBySchema(topicName, schemaInfoWithVersion.getSchemaInfo()); + Assert.assertEquals(version2, 0); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/TopicSchemaTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/TopicSchemaTest.java new file mode 100644 index 0000000000000..66bfd1c3ec2b0 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/TopicSchemaTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service.schema; + +import static org.apache.pulsar.broker.service.schema.SchemaRegistry.SchemaAndMetadata; +import static org.testng.Assert.assertTrue; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.common.naming.TopicDomain; +import org.apache.pulsar.common.naming.TopicName; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class TopicSchemaTest extends ProducerConsumerBase { + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @DataProvider(name = "topicDomains") + public Object[][] topicDomains() { + return new Object[][]{ + {TopicDomain.non_persistent}, + {TopicDomain.persistent} + }; + } + + @Test(dataProvider = "topicDomains") + public void testDeleteNonPartitionedTopicWithSchema(TopicDomain topicDomain) throws Exception { + final String topic = BrokerTestUtil.newUniqueName(topicDomain.value() + "://public/default/tp"); + final String schemaId = TopicName.get(TopicName.get(topic).getPartitionedTopicName()).getSchemaName(); + admin.topics().createNonPartitionedTopic(topic); + + // Add schema. + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topic) + .enableBatching(false).create(); + producer.close(); + List schemaList1 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList1 != null && schemaList1.size() > 0); + + // Verify the schema has been deleted with topic. + admin.topics().delete(topic, false); + List schemaList2 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList2 == null || schemaList2.isEmpty()); + } + + @Test + public void testDeletePartitionedTopicWithoutSchema() throws Exception { + // Non-persistent topic does not support partitioned topic now, so only write a test case for persistent topic. + TopicDomain topicDomain = TopicDomain.persistent; + final String topic = BrokerTestUtil.newUniqueName(topicDomain.value() + "://public/default/tp"); + final String partition0 = topic + "-partition-0"; + final String partition1 = topic + "-partition-1"; + final String schemaId = TopicName.get(TopicName.get(topic).getPartitionedTopicName()).getSchemaName(); + admin.topics().createPartitionedTopic(topic, 2); + + // Add schema. + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topic) + .enableBatching(false).create(); + producer.close(); + List schemaList1 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList1 != null && schemaList1.size() > 0); + + // Verify the schema will not been deleted with partition-0. + admin.topics().delete(partition0, false); + List schemaList2 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList2 != null && schemaList2.size() > 0); + + // Verify the schema will not been deleted with partition-0 & partition-1. + admin.topics().delete(partition1, false); + List schemaList3 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList3 != null && schemaList3.size() > 0); + + // Verify the schema will be deleted with partitioned metadata. + admin.topics().deletePartitionedTopic(topic, false); + List schemaList4 = pulsar.getSchemaRegistryService().getAllSchemas(schemaId).join() + .stream().map(s -> s.join()).filter(Objects::nonNull).collect(Collectors.toList()); + assertTrue(schemaList4 == null || schemaList4.isEmpty()); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/utils/ClientChannelHelper.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/utils/ClientChannelHelper.java index bf0dd3aa9c1c5..c8fce32efc5f0 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/utils/ClientChannelHelper.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/utils/ClientChannelHelper.java @@ -27,6 +27,8 @@ import org.apache.pulsar.common.api.proto.CommandEndTxnResponse; import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespaceResponse; import org.apache.pulsar.common.api.proto.CommandPartitionedTopicMetadataResponse; +import org.apache.pulsar.common.api.proto.CommandPing; +import org.apache.pulsar.common.api.proto.CommandPong; import org.apache.pulsar.common.api.proto.CommandWatchTopicListSuccess; import org.apache.pulsar.common.protocol.PulsarDecoder; import org.apache.pulsar.common.api.proto.CommandAck; @@ -207,6 +209,16 @@ protected void handleEndTxnOnSubscriptionResponse( CommandEndTxnOnSubscriptionResponse commandEndTxnOnSubscriptionResponse) { queue.offer(new CommandEndTxnOnSubscriptionResponse().copyFrom(commandEndTxnOnSubscriptionResponse)); } + + @Override + protected void handlePing(CommandPing ping) { + queue.offer(new CommandPing().copyFrom(ping)); + } + + @Override + protected void handlePong(CommandPong pong) { + return; + } }; } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java index bbeee9f5a497a..f29c643a8f50b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java @@ -446,4 +446,37 @@ public void testAvgMessagesPerEntry() throws Exception { int avgMessagesPerEntry = consumerStats.getAvgMessagesPerEntry(); assertEquals(3, avgMessagesPerEntry); } + + @Test() + public void testNonPersistentTopicSharedSubscriptionUnackedMessages() throws Exception { + final String topicName = "non-persistent://my-property/my-ns/my-topic" + UUID.randomUUID(); + final String subName = "my-sub"; + + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topicName) + .create(); + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(topicName) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + + for (int i = 0; i < 5; i++) { + producer.send(("message-" + i).getBytes()); + } + for (int i = 0; i < 5; i++) { + Message msg = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledge(msg); + } + TimeUnit.SECONDS.sleep(1); + + TopicStats topicStats = admin.topics().getStats(topicName); + assertEquals(1, topicStats.getSubscriptions().size()); + List consumers = topicStats.getSubscriptions().get(subName).getConsumers(); + assertEquals(1, consumers.size()); + assertEquals(0, consumers.get(0).getUnackedMessages()); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStatsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStatsTest.java index b5933f9ecf529..0e12d75f74fa0 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStatsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStatsTest.java @@ -20,10 +20,12 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; - +import java.util.HashMap; +import org.apache.bookkeeper.mledger.util.StatsBuckets; +import org.apache.pulsar.common.policies.data.stats.TopicMetricBean; import org.testng.annotations.Test; -@Test(groups = "broker") +@Test(groups = {"broker"}) public class AggregatedNamespaceStatsTest { @Test @@ -157,4 +159,101 @@ public void testSimpleAggregation() { assertEquals(nsSubStats.unackedMessages, 2); } + + @Test + public void testReset() { + AggregatedNamespaceStats stats = new AggregatedNamespaceStats(); + stats.topicsCount = 8; + stats.subscriptionsCount = 3; + stats.producersCount = 1; + stats.consumersCount = 8; + stats.rateIn = 1.3; + stats.rateOut = 3.5; + stats.throughputIn = 3.2; + stats.throughputOut = 5.8; + stats.messageAckRate = 12; + stats.bytesInCounter = 1234; + stats.msgInCounter = 3889; + stats.bytesOutCounter = 89775; + stats.msgOutCounter = 28983; + stats.msgBacklog = 39; + stats.msgDelayed = 31; + + stats.ongoingTxnCount = 87; + stats.abortedTxnCount = 74; + stats.committedTxnCount = 34; + + stats.backlogQuotaLimit = 387; + stats.backlogQuotaLimitTime = 8771; + + stats.replicationStats = new HashMap<>(); + stats.replicationStats.put("r", new AggregatedReplicationStats()); + + stats.subscriptionStats = new HashMap<>(); + stats.subscriptionStats.put("r", new AggregatedSubscriptionStats()); + + stats.compactionRemovedEventCount = 124; + stats.compactionSucceedCount = 487; + stats.compactionFailedCount = 84857; + stats.compactionDurationTimeInMills = 2384; + stats.compactionReadThroughput = 355423; + stats.compactionWriteThroughput = 23299; + stats.compactionCompactedEntriesCount = 37522; + stats.compactionCompactedEntriesSize = 8475; + + stats.compactionLatencyBuckets = new StatsBuckets(5); + stats.compactionLatencyBuckets.addValue(3); + + stats.delayedMessageIndexSizeInBytes = 45223; + + stats.bucketDelayedIndexStats = new HashMap<>(); + stats.bucketDelayedIndexStats.put("t", new TopicMetricBean()); + + stats.reset(); + + assertEquals(stats.bytesOutCounter, 0); + assertEquals(stats.topicsCount, 0); + assertEquals(stats.subscriptionsCount, 0); + assertEquals(stats.producersCount, 0); + assertEquals(stats.consumersCount, 0); + assertEquals(stats.rateIn, 0); + assertEquals(stats.rateOut, 0); + assertEquals(stats.throughputIn, 0); + assertEquals(stats.throughputOut, 0); + assertEquals(stats.messageAckRate, 0); + assertEquals(stats.bytesInCounter, 0); + assertEquals(stats.msgInCounter, 0); + assertEquals(stats.bytesOutCounter, 0); + assertEquals(stats.msgOutCounter, 0); + + assertEquals(stats.managedLedgerStats.storageSize, 0); + + assertEquals(stats.msgBacklog, 0); + assertEquals(stats.msgDelayed, 0); + + assertEquals(stats.ongoingTxnCount, 0); + assertEquals(stats.abortedTxnCount, 0); + assertEquals(stats.committedTxnCount, 0); + + assertEquals(stats.backlogQuotaLimit, 0); + assertEquals(stats.backlogQuotaLimitTime, -1); + + assertEquals(stats.replicationStats.size(), 0); + assertEquals(stats.subscriptionStats.size(), 0); + + assertEquals(stats.compactionRemovedEventCount, 0); + assertEquals(stats.compactionSucceedCount, 0); + assertEquals(stats.compactionFailedCount, 0); + assertEquals(stats.compactionDurationTimeInMills, 0); + assertEquals(stats.compactionReadThroughput, 0); + assertEquals(stats.compactionWriteThroughput, 0); + assertEquals(stats.compactionCompactedEntriesCount, 0); + assertEquals(stats.compactionCompactedEntriesSize, 0); + + assertEquals(stats.compactionLatencyBuckets.getSum(), 0); + + assertEquals(stats.delayedMessageIndexSizeInBytes, 0); + assertEquals(stats.bucketDelayedIndexStats.size(), 0); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java index 008c2143a3566..a2401ebe19a06 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java @@ -18,6 +18,8 @@ */ package org.apache.pulsar.broker.systopic; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; import com.google.common.collect.Sets; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -37,6 +39,7 @@ import org.apache.pulsar.broker.service.BrokerTestBase; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.broker.service.schema.SchemaRegistry; import org.apache.pulsar.client.admin.ListTopicsOptions; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; @@ -55,6 +58,7 @@ import org.apache.pulsar.common.policies.data.BacklogQuota; import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.policies.data.TopicPolicies; import org.apache.pulsar.common.policies.data.TopicType; import org.apache.pulsar.common.util.FutureUtil; import org.awaitility.Awaitility; @@ -77,6 +81,7 @@ protected void setup() throws Exception { conf.setDefaultNumPartitions(PARTITIONS); conf.setManagedLedgerMaxEntriesPerLedger(1); conf.setBrokerDeleteInactiveTopicsEnabled(false); + conf.setTransactionCoordinatorEnabled(true); super.baseSetup(); } @@ -160,7 +165,7 @@ public void testProduceAndConsumeUnderSystemNamespace() throws Exception { @Test public void testHealthCheckTopicNotOffload() throws Exception { - NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfig()); TopicName topicName = TopicName.get("persistent", namespaceName, BrokersBase.HEALTH_CHECK_TOPIC_SUFFIX); PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService() @@ -180,18 +185,25 @@ public void testHealthCheckTopicNotOffload() throws Exception { @Test public void testSystemNamespaceNotCreateChangeEventsTopic() throws Exception { admin.brokers().healthcheck(TopicVersion.V2); - NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfig()); TopicName topicName = TopicName.get("persistent", namespaceName, SystemTopicNames.NAMESPACE_EVENTS_LOCAL_NAME); Optional optionalTopic = pulsar.getBrokerService() .getTopic(topicName.getPartition(1).toString(), false).join(); Assert.assertTrue(optionalTopic.isEmpty()); + + TopicName heartbeatTopicName = TopicName.get("persistent", + namespaceName, BrokersBase.HEALTH_CHECK_TOPIC_SUFFIX); + admin.topics().getRetention(heartbeatTopicName.toString()); + optionalTopic = pulsar.getBrokerService() + .getTopic(topicName.getPartition(1).toString(), false).join(); + Assert.assertTrue(optionalTopic.isEmpty()); } @Test public void testHeartbeatTopicNotAllowedToSendEvent() throws Exception { admin.brokers().healthcheck(TopicVersion.V2); - NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfig()); TopicName topicName = TopicName.get("persistent", namespaceName, SystemTopicNames.NAMESPACE_EVENTS_LOCAL_NAME); for (int partition = 0; partition < PARTITIONS; partition ++) { @@ -203,6 +215,40 @@ public void testHeartbeatTopicNotAllowedToSendEvent() throws Exception { }); } + @Test + public void testHeartbeatTopicBeDeleted() throws Exception { + admin.brokers().healthcheck(TopicVersion.V2); + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), + pulsar.getConfig()); + TopicName heartbeatTopicName = TopicName.get("persistent", namespaceName, BrokersBase.HEALTH_CHECK_TOPIC_SUFFIX); + + List topics = getPulsar().getNamespaceService().getListOfPersistentTopics(namespaceName).join(); + Assert.assertEquals(topics.size(), 1); + Assert.assertEquals(topics.get(0), heartbeatTopicName.toString()); + + admin.topics().delete(heartbeatTopicName.toString(), true); + topics = getPulsar().getNamespaceService().getListOfPersistentTopics(namespaceName).join(); + Assert.assertEquals(topics.size(), 0); + } + + @Test + public void testHeartbeatNamespaceNotCreateTransactionInternalTopic() throws Exception { + admin.brokers().healthcheck(TopicVersion.V2); + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), + pulsar.getConfig()); + TopicName topicName = TopicName.get("persistent", + namespaceName, SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT); + Optional optionalTopic = pulsar.getBrokerService() + .getTopic(topicName.getPartition(1).toString(), false).join(); + Assert.assertTrue(optionalTopic.isEmpty()); + + List topics = getPulsar().getNamespaceService().getListOfPersistentTopics(namespaceName).join(); + Assert.assertEquals(topics.size(), 1); + TopicName heartbeatTopicName = TopicName.get("persistent", + namespaceName, BrokersBase.HEALTH_CHECK_TOPIC_SUFFIX); + Assert.assertEquals(topics.get(0), heartbeatTopicName.toString()); + } + @Test public void testSetBacklogCausedCreatingProducerFailure() throws Exception { final String ns = "prop/ns-test"; @@ -299,4 +345,25 @@ public void testSystemTopicNotCheckExceed() throws Exception { writer1.get().close(); writer2.get().close(); } + + @Test + public void testDeleteTopicSchemaAndPolicyWhenTopicIsNotLoaded() throws Exception { + final String ns = "prop/ns-test"; + admin.namespaces().createNamespace(ns, 2); + final String topicName = "persistent://prop/ns-test/testDeleteTopicSchemaAndPolicyWhenTopicIsNotLoaded"; + admin.topics().createNonPartitionedTopic(topicName); + pulsarClient.newProducer(Schema.STRING).topic(topicName).create().close(); + admin.topicPolicies().setMaxConsumers(topicName, 2); + Awaitility.await().untilAsserted(() -> assertEquals(admin.topicPolicies().getMaxConsumers(topicName), 2)); + CompletableFuture> topic = pulsar.getBrokerService().getTopic(topicName, false); + PersistentTopic persistentTopic = (PersistentTopic) topic.join().get(); + persistentTopic.close(); + admin.topics().delete(topicName); + TopicPolicies topicPolicies = pulsar.getTopicPoliciesService().getTopicPoliciesIfExists(TopicName.get(topicName)); + assertNull(topicPolicies); + String base = TopicName.get(topicName).getPartitionedTopicName(); + String id = TopicName.get(base).getSchemaName(); + CompletableFuture schema = pulsar.getSchemaRegistryService().getSchema(id); + assertNull(schema.join()); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/NonStartableTestPulsarService.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/NonStartableTestPulsarService.java index 2896f338e4a1f..2027bb33bf18e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/NonStartableTestPulsarService.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/NonStartableTestPulsarService.java @@ -20,6 +20,7 @@ import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import io.netty.channel.EventLoopGroup; import java.io.IOException; import java.util.Collections; @@ -42,6 +43,7 @@ import org.apache.pulsar.broker.transaction.buffer.TransactionBufferProvider; import org.apache.pulsar.broker.transaction.pendingack.TransactionPendingAckStoreProvider; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.common.naming.TopicName; @@ -77,7 +79,10 @@ public NonStartableTestPulsarService(SpyConfig spyConfig, ServiceConfiguration c throw new RuntimeException(e); } setSchemaRegistryService(spyWithClassAndConstructorArgs(DefaultSchemaRegistryService.class)); - setClient(mock(PulsarClientImpl.class)); + PulsarClientImpl mockClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); + setClient(mockClient); this.namespaceService = mock(NamespaceService.class); try { startNamespaceService(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/PulsarTestContext.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/PulsarTestContext.java index 40a42286fda74..449f7fc7d57e1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/PulsarTestContext.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/testcontext/PulsarTestContext.java @@ -54,6 +54,7 @@ import org.apache.pulsar.broker.service.ServerCnx; import org.apache.pulsar.broker.storage.ManagedLedgerStorage; import org.apache.pulsar.common.util.GracefulExecutorServicesShutdown; +import org.apache.pulsar.common.util.PortManager; import org.apache.pulsar.compaction.CompactionServiceFactory; import org.apache.pulsar.compaction.Compactor; import org.apache.pulsar.compaction.PulsarCompactionServiceFactory; @@ -157,6 +158,8 @@ public class PulsarTestContext implements AutoCloseable { private final boolean startable; + private final boolean preallocatePorts; + public ManagedLedgerFactory getManagedLedgerFactory() { return managedLedgerClientFactory.getManagedLedgerFactory(); @@ -224,7 +227,9 @@ public static class Builder { protected SpyConfig.Builder spyConfigBuilder = SpyConfig.builder(SpyConfig.SpyType.NONE); protected Consumer pulsarServiceCustomizer; protected ServiceConfiguration svcConfig = initializeConfig(); - protected Consumer configOverrideCustomizer = this::defaultOverrideServiceConfiguration; + protected Consumer configOverrideCustomizer; + + protected boolean configOverrideCalled = false; protected Function brokerServiceCustomizer = Function.identity(); /** @@ -349,6 +354,7 @@ public Builder configCustomizer(Consumer configCustomerize */ public Builder configOverride(Consumer configOverrideCustomizer) { this.configOverrideCustomizer = configOverrideCustomizer; + this.configOverrideCalled = true; return this; } @@ -525,6 +531,12 @@ public final PulsarTestContext build() { if (super.config == null) { config(svcConfig); } + handlePreallocatePorts(super.config); + if (configOverrideCustomizer != null || !configOverrideCalled) { + // call defaultOverrideServiceConfiguration if configOverrideCustomizer + // isn't explicitly set to null with `.configOverride(null)` call + defaultOverrideServiceConfiguration(super.config); + } if (configOverrideCustomizer != null) { configOverrideCustomizer.accept(super.config); } @@ -547,6 +559,37 @@ public final PulsarTestContext build() { return super.build(); } + protected void handlePreallocatePorts(ServiceConfiguration config) { + if (super.preallocatePorts) { + config.getBrokerServicePort().ifPresent(portNumber -> { + if (portNumber == 0) { + config.setBrokerServicePort(Optional.of(PortManager.nextLockedFreePort())); + } + }); + config.getBrokerServicePortTls().ifPresent(portNumber -> { + if (portNumber == 0) { + config.setBrokerServicePortTls(Optional.of(PortManager.nextLockedFreePort())); + } + }); + config.getWebServicePort().ifPresent(portNumber -> { + if (portNumber == 0) { + config.setWebServicePort(Optional.of(PortManager.nextLockedFreePort())); + } + }); + config.getWebServicePortTls().ifPresent(portNumber -> { + if (portNumber == 0) { + config.setWebServicePortTls(Optional.of(PortManager.nextLockedFreePort())); + } + }); + registerCloseable(() -> { + config.getBrokerServicePort().ifPresent(PortManager::releaseLockedPort); + config.getBrokerServicePortTls().ifPresent(PortManager::releaseLockedPort); + config.getWebServicePort().ifPresent(PortManager::releaseLockedPort); + config.getWebServicePortTls().ifPresent(PortManager::releaseLockedPort); + }); + } + } + private void initializeCommonPulsarServices(SpyConfig spyConfig) { if (super.bookKeeperClient == null && super.managedLedgerClientFactory == null) { if (super.executor == null) { @@ -715,7 +758,8 @@ protected void initializePulsarServices(SpyConfig spyConfig, Builder builder) { if (metadataStore == null) { metadataStore = builder.configurationMetadataStore; } - NamespaceResources nsr = spyConfigPulsarResources.spy(NamespaceResources.class, metadataStore, 30); + NamespaceResources nsr = spyConfigPulsarResources.spy(NamespaceResources.class, + builder.localMetadataStore, metadataStore, 30); TopicResources tsr = spyConfigPulsarResources.spy(TopicResources.class, metadataStore); pulsarResources( spyConfigPulsarResources.spy( diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java index 0e17719aca7e7..9e262d1cb5617 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java @@ -19,6 +19,8 @@ package org.apache.pulsar.broker.transaction; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertTrue; import com.google.common.collect.Sets; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; @@ -39,12 +41,15 @@ import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.api.transaction.Transaction; import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.client.impl.ChunkMessageIdImpl; import org.apache.pulsar.common.api.proto.MessageIdData; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.api.proto.TxnAction; @@ -368,4 +373,45 @@ public void completed(Exception e, long ledgerId, long entryId) { return positionList; } + @Test + public void testAckChunkMessage() throws Exception { + String producerName = "test-producer"; + String subName = "testAckChunkMessage"; + @Cleanup + PulsarClient pulsarClient1 = PulsarClient.builder().serviceUrl(pulsarServiceList.get(0).getBrokerServiceUrl()) + .enableTransaction(true).build(); + @Cleanup + Producer producer = pulsarClient1 + .newProducer(Schema.STRING) + .producerName(producerName) + .topic(CONSUME_TOPIC) + .enableChunking(true) + .enableBatching(false) + .create(); + Consumer consumer = pulsarClient1 + .newConsumer(Schema.STRING) + .subscriptionType(SubscriptionType.Shared) + .topic(CONSUME_TOPIC) + .subscriptionName(subName) + .subscribe(); + + int messageSize = 6000; // payload size in KB + String message = "a".repeat(messageSize * 1000); + MessageId messageId = producer.newMessage().value(message).send(); + assertTrue(messageId instanceof ChunkMessageIdImpl); + assertNotEquals(((ChunkMessageIdImpl) messageId).getLastChunkMessageId(), + ((ChunkMessageIdImpl) messageId).getFirstChunkMessageId()); + + Transaction transaction = pulsarClient1.newTransaction() + .withTransactionTimeout(5, TimeUnit.HOURS) + .build() + .get(); + + Message msg = consumer.receive(); + consumer.acknowledgeAsync(msg.getMessageId(), transaction); + transaction.commit().get(); + + Assert.assertEquals(admin.topics().getStats(CONSUME_TOPIC).getSubscriptions().get(subName) + .getUnackedMessages(), 0); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java index cf389824794e5..c7e545618dbae 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java @@ -106,6 +106,7 @@ import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStore; import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStoreProvider; import org.apache.pulsar.broker.transaction.pendingack.impl.PendingAckHandleImpl; +import org.apache.pulsar.client.admin.LongRunningProcessStatus; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; @@ -1783,4 +1784,118 @@ private void getTopic(String topicName) { }); } + @Test + public void testReadCommittedWithReadCompacted() throws Exception{ + final String namespace = "tnx/ns-prechecks"; + final String topic = "persistent://" + namespace + "/test_transaction_topic"; + admin.namespaces().createNamespace(namespace); + admin.topics().createNonPartitionedTopic(topic); + + admin.topicPolicies().setCompactionThreshold(topic, 100 * 1024 * 1024); + + @Cleanup + Consumer consumer = this.pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub") + .subscriptionType(SubscriptionType.Exclusive) + .readCompacted(true) + .subscribe(); + + @Cleanup + Producer producer = this.pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + + producer.newMessage().key("K1").value("V1").send(); + + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(1, TimeUnit.MINUTES).build().get(); + producer.newMessage(txn).key("K2").value("V2").send(); + producer.newMessage(txn).key("K3").value("V3").send(); + + List messages = new ArrayList<>(); + while (true) { + Message message = consumer.receive(5, TimeUnit.SECONDS); + if (message == null) { + break; + } + messages.add(message.getValue()); + } + + Assert.assertEquals(messages, List.of("V1")); + + txn.commit(); + + messages.clear(); + + while (true) { + Message message = consumer.receive(5, TimeUnit.SECONDS); + if (message == null) { + break; + } + messages.add(message.getValue()); + } + + Assert.assertEquals(messages, List.of("V2", "V3")); + } + + + @Test + public void testReadCommittedWithCompaction() throws Exception{ + final String namespace = "tnx/ns-prechecks"; + final String topic = "persistent://" + namespace + "/test_transaction_topic" + UUID.randomUUID(); + admin.namespaces().createNamespace(namespace); + admin.topics().createNonPartitionedTopic(topic); + + admin.topicPolicies().setCompactionThreshold(topic, 100 * 1024 * 1024); + + @Cleanup + Producer producer = this.pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + + producer.newMessage().key("K1").value("V1").send(); + + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(1, TimeUnit.MINUTES).build().get(); + producer.newMessage(txn).key("K2").value("V2").send(); + producer.newMessage(txn).key("K3").value("V3").send(); + txn.commit().get(); + + producer.newMessage().key("K1").value("V4").send(); + + Transaction txn2 = pulsarClient.newTransaction() + .withTransactionTimeout(1, TimeUnit.MINUTES).build().get(); + producer.newMessage(txn2).key("K2").value("V5").send(); + producer.newMessage(txn2).key("K3").value("V6").send(); + txn2.commit().get(); + + admin.topics().triggerCompaction(topic); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topic).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + @Cleanup + Consumer consumer = this.pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub") + .subscriptionType(SubscriptionType.Exclusive) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .readCompacted(true) + .subscribe(); + List result = new ArrayList<>(); + while (true) { + Message receive = consumer.receive(2, TimeUnit.SECONDS); + if (receive == null) { + break; + } + + result.add(receive.getValue()); + } + + Assert.assertEquals(result, List.of("V4", "V5", "V6")); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java index c0300c63b3587..1ff835732aab5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java @@ -157,10 +157,8 @@ protected void startBroker() throws Exception { conf.setBrokerShutdownTimeoutMs(0L); conf.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d)); conf.setBrokerServicePort(Optional.of(0)); - conf.setBrokerServicePortTls(Optional.of(0)); conf.setAdvertisedAddress("localhost"); conf.setWebServicePort(Optional.of(0)); - conf.setWebServicePortTls(Optional.of(0)); conf.setTransactionCoordinatorEnabled(true); conf.setBrokerDeduplicationEnabled(true); conf.setTransactionBufferSnapshotMaxTransactionCount(2); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java index aa98fc7d70106..ecba4a9f1d54a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java @@ -18,9 +18,18 @@ */ package org.apache.pulsar.broker.transaction.buffer; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.when; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import static org.testng.AssertJUnit.fail; +import java.util.List; +import lombok.Cleanup; import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.service.BrokerService; @@ -30,8 +39,15 @@ import org.apache.pulsar.broker.transaction.TransactionTestBase; import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBuffer; import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBufferState; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Reader; +import org.apache.pulsar.client.api.TopicMessageId; import org.apache.pulsar.client.api.transaction.Transaction; +import org.apache.pulsar.client.impl.MessageIdImpl; +import org.apache.pulsar.client.impl.TopicMessageIdImpl; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; import org.apache.pulsar.transaction.coordinator.TransactionMetadataStore; @@ -179,4 +195,177 @@ public void testCloseTransactionBufferWhenTimeout() throws Exception { Assert.assertTrue(f.isCompletedExceptionally()); } + /** + * This test mainly test the following two point: + * 1. `getLastMessageIds` will get max read position. + * Send two message |1:0|1:1|; mock max read position as |1:0|; `getLastMessageIds` will get |1:0|. + * 2. `getLastMessageIds` will wait Transaction buffer recover completely. + * Mock `checkIfTBRecoverCompletely` return an exception, `getLastMessageIds` will fail too. + * Mock `checkIfTBRecoverCompletely` return null, `getLastMessageIds` will get correct result. + */ + @Test + public void testGetMaxPositionAfterTBReady() throws Exception { + // 1. Prepare test environment. + String topic = "persistent://" + NAMESPACE1 + "/testGetMaxReadyPositionAfterTBReady"; + // 1.1 Mock component. + TransactionBuffer transactionBuffer = Mockito.spy(TransactionBuffer.class); + when(transactionBuffer.checkIfTBRecoverCompletely(anyBoolean())) + // Handle producer will check transaction buffer recover completely. + .thenReturn(CompletableFuture.completedFuture(null)) + // If the Transaction buffer failed to recover, we can not get the correct last max read id. + .thenReturn(CompletableFuture.failedFuture(new Throwable("Mock fail"))) + // If the transaction buffer recover successfully, the max read position can be acquired successfully. + .thenReturn(CompletableFuture.completedFuture(null)); + TransactionBufferProvider transactionBufferProvider = Mockito.spy(TransactionBufferProvider.class); + Mockito.doReturn(transactionBuffer).when(transactionBufferProvider).newTransactionBuffer(any()); + TransactionBufferProvider originalTBProvider = getPulsarServiceList().get(0).getTransactionBufferProvider(); + Mockito.doReturn(transactionBufferProvider).when(getPulsarServiceList().get(0)).getTransactionBufferProvider(); + // 2. Building producer and consumer. + admin.topics().createNonPartitionedTopic(topic); + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName("sub") + .subscribe(); + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .create(); + // 3. Send message and test the exception can be handled as expected. + MessageIdImpl messageId = (MessageIdImpl) producer.newMessage().send(); + producer.newMessage().send(); + Mockito.doReturn(new PositionImpl(messageId.getLedgerId(), messageId.getEntryId())) + .when(transactionBuffer).getMaxReadPosition(); + try { + consumer.getLastMessageIds(); + fail(); + } catch (PulsarClientException exception) { + assertTrue(exception.getMessage().contains("Failed to recover Transaction Buffer.")); + } + List messageIdList = consumer.getLastMessageIds(); + assertEquals(messageIdList.size(), 1); + TopicMessageIdImpl actualMessageID = (TopicMessageIdImpl) messageIdList.get(0); + assertEquals(messageId.getLedgerId(), actualMessageID.getLedgerId()); + assertEquals(messageId.getEntryId(), actualMessageID.getEntryId()); + // 4. Clean resource + Mockito.doReturn(originalTBProvider).when(getPulsarServiceList().get(0)).getTransactionBufferProvider(); + } + + /** + * Add a E2E test for the get last message ID. It tests 4 cases. + *

+ * 1. Only normal messages in the topic. + * 2. There are ongoing transactions, last message ID will not be updated until transaction end. + * 3. Aborted transaction will make the last message ID be updated as expected. + * 4. Committed transaction will make the last message ID be updated as expected. + *

+ */ + @Test + public void testGetLastMessageIdsWithOngoingTransactions() throws Exception { + // 1. Prepare environment + String topic = "persistent://" + NAMESPACE1 + "/testGetLastMessageIdsWithOngoingTransactions"; + String subName = "my-subscription"; + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .create(); + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName(subName) + .subscribe(); + + // 2. Test last max read position can be required correctly. + // 2.1 Case1: send 3 original messages. |1:0|1:1|1:2| + MessageIdImpl expectedLastMessageID = null; + for (int i = 0; i < 3; i++) { + expectedLastMessageID = (MessageIdImpl) producer.newMessage().send(); + } + assertMessageId(consumer, expectedLastMessageID, 0); + // 2.2 Case2: send 2 ongoing transactional messages and 2 original messages. + // |1:0|1:1|1:2|txn1->1:3|1:4|txn2->1:5|1:6|. + Transaction txn1 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.HOURS) + .build() + .get(); + Transaction txn2 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.HOURS) + .build() + .get(); + producer.newMessage(txn1).send(); + MessageIdImpl expectedLastMessageID1 = (MessageIdImpl) producer.newMessage().send(); + producer.newMessage(txn2).send(); + MessageIdImpl expectedLastMessageID2 = (MessageIdImpl) producer.newMessage().send(); + // 2.2.1 Last message ID will not change when txn1 and txn2 do not end. + assertMessageId(consumer, expectedLastMessageID, 0); + // 2.2.2 Last message ID will update to 1:4 when txn1 committed. + txn1.commit().get(5, TimeUnit.SECONDS); + assertMessageId(consumer, expectedLastMessageID1, 0); + // 2.2.3 Last message ID will update to 1:6 when txn2 aborted. + txn2.abort().get(5, TimeUnit.SECONDS); + // Todo: We can not ignore the marker's position in this fix. + assertMessageId(consumer, expectedLastMessageID2, 2); + } + + /** + * produce 3 messages and then trigger a ledger switch, + * then create a transaction and send a transactional message. + * As there are messages in the new ledger, the reader should be able to read the messages. + * But reader.hasMessageAvailable() returns false if the entry id of max read position is -1. + * @throws Exception + */ + @Test + public void testGetLastMessageIdsWithOpenTransactionAtLedgerHead() throws Exception { + String topic = "persistent://" + NAMESPACE1 + "/testGetLastMessageIdsWithOpenTransactionAtLedgerHead"; + String subName = "my-subscription"; + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .create(); + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName(subName) + .subscribe(); + MessageId expectedLastMessageID = null; + for (int i = 0; i < 3; i++) { + expectedLastMessageID = producer.newMessage().value(String.valueOf(i).getBytes()).send(); + System.out.println("expectedLastMessageID: " + expectedLastMessageID); + } + triggerLedgerSwitch(topic); + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.HOURS) + .build() + .get(); + producer.newMessage(txn).send(); + + Reader reader = pulsarClient.newReader() + .topic(topic) + .startMessageId(MessageId.earliest) + .create(); + assertTrue(reader.hasMessageAvailable()); + } + + private void triggerLedgerSwitch(String topicName) throws Exception{ + admin.topics().unload(topicName); + Awaitility.await().until(() -> { + CompletableFuture> topicFuture = + getPulsarServiceList().get(0).getBrokerService().getTopic(topicName, false); + if (!topicFuture.isDone() || topicFuture.isCompletedExceptionally()){ + return false; + } + Optional topicOptional = topicFuture.join(); + if (!topicOptional.isPresent()){ + return false; + } + PersistentTopic persistentTopic = (PersistentTopic) topicOptional.get(); + ManagedLedgerImpl managedLedger = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); + return managedLedger.getState() == ManagedLedgerImpl.State.LedgerOpened; + }); + } + + private void assertMessageId(Consumer consumer, MessageIdImpl expected, int entryOffset) throws Exception { + TopicMessageIdImpl actual = (TopicMessageIdImpl) consumer.getLastMessageIds().get(0); + assertEquals(expected.getEntryId(), actual.getEntryId() - entryOffset); + assertEquals(expected.getLedgerId(), actual.getLedgerId()); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java index 2cfc9f46f0ebe..1dc086dbe3470 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java @@ -18,6 +18,8 @@ */ package org.apache.pulsar.broker.transaction.buffer; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; @@ -51,6 +53,7 @@ import org.apache.pulsar.client.api.transaction.TransactionBufferClientException; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.api.proto.TxnAction; import org.apache.pulsar.common.naming.NamespaceName; @@ -207,6 +210,8 @@ public void testAbortOnSubscription() throws ExecutionException, InterruptedExce @Test public void testTransactionBufferMetrics() throws Exception { + this.cleanup(); + this.setup(); //Test commit for (int i = 0; i < partitions; i++) { String topic = partitionedTopicName.getPartition(i).toString(); @@ -253,14 +258,21 @@ public void testTransactionBufferMetrics() throws Exception { assertEquals(pending.size(), 1); } + /** + * This is a flaky test. + */ @Test public void testTransactionBufferClientTimeout() throws Exception { PulsarService pulsarService = pulsarServiceList.get(0); - PulsarClient mockClient = mock(PulsarClientImpl.class); + PulsarClientImpl mockClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); CompletableFuture completableFuture = new CompletableFuture<>(); ClientCnx clientCnx = mock(ClientCnx.class); completableFuture.complete(clientCnx); when(((PulsarClientImpl)mockClient).getConnection(anyString())).thenReturn(completableFuture); + when(((PulsarClientImpl)mockClient).getConnection(anyString(), anyInt())).thenReturn(completableFuture); + when(((PulsarClientImpl)mockClient).getConnection(any(), any(), anyInt())).thenReturn(completableFuture); ChannelHandlerContext cnx = mock(ChannelHandlerContext.class); when(clientCnx.ctx()).thenReturn(cnx); Channel channel = mock(Channel.class); @@ -287,7 +299,9 @@ public PulsarClient answer(InvocationOnMock invocation) throws Throwable { ConcurrentSkipListMap outstandingRequests = (ConcurrentSkipListMap) field.get(transactionBufferHandler); - assertEquals(outstandingRequests.size(), 1); + Awaitility.await().atMost(1, TimeUnit.SECONDS).untilAsserted(() -> { + assertEquals(outstandingRequests.size(), 1); + }); Awaitility.await().atLeast(2, TimeUnit.SECONDS).until(() -> { if (outstandingRequests.size() == 0) { @@ -307,11 +321,13 @@ public PulsarClient answer(InvocationOnMock invocation) throws Throwable { @Test public void testTransactionBufferChannelUnActive() throws PulsarServerException { PulsarService pulsarService = pulsarServiceList.get(0); - PulsarClient mockClient = mock(PulsarClientImpl.class); + PulsarClientImpl mockClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); CompletableFuture completableFuture = new CompletableFuture<>(); ClientCnx clientCnx = mock(ClientCnx.class); completableFuture.complete(clientCnx); - when(((PulsarClientImpl)mockClient).getConnection(anyString())).thenReturn(completableFuture); + when(((PulsarClientImpl)mockClient).getConnection(anyString(), anyInt())).thenReturn(completableFuture); ChannelHandlerContext cnx = mock(ChannelHandlerContext.class); when(clientCnx.ctx()).thenReturn(cnx); Channel channel = mock(Channel.class); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java index d6ec092c4456f..278cdbac1f09d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java @@ -24,6 +24,7 @@ import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.transaction.buffer.impl.TransactionBufferHandlerImpl; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; @@ -31,8 +32,8 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; -import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.api.proto.TxnAction; import org.apache.pulsar.common.naming.NamespaceBundle; @@ -46,7 +47,9 @@ public class TransactionBufferHandlerImplTest { @Test public void testRequestCredits() throws PulsarServerException { - PulsarClient pulsarClient = mock(PulsarClientImpl.class); + PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); PulsarService pulsarService = mock(PulsarService.class); NamespaceService namespaceService = mock(NamespaceService.class); when(pulsarService.getNamespaceService()).thenReturn(namespaceService); @@ -54,7 +57,10 @@ public void testRequestCredits() throws PulsarServerException { when(namespaceService.getBundleAsync(any())).thenReturn(CompletableFuture.completedFuture(mock(NamespaceBundle.class))); Optional opData = Optional.empty(); when(namespaceService.getOwnerAsync(any())).thenReturn(CompletableFuture.completedFuture(opData)); - when(((PulsarClientImpl)pulsarClient).getConnection(anyString())).thenReturn(CompletableFuture.completedFuture(mock(ClientCnx.class))); + when(((PulsarClientImpl)pulsarClient).getConnection(anyString(), anyInt())) + .thenReturn(CompletableFuture.completedFuture(mock(ClientCnx.class))); + when(((PulsarClientImpl)pulsarClient).getConnection(anyString())) + .thenReturn(CompletableFuture.completedFuture(mock(ClientCnx.class))); TransactionBufferHandlerImpl handler = spy(new TransactionBufferHandlerImpl(pulsarService, null, 1000, 3000)); doNothing().when(handler).endTxn(any()); doReturn(CompletableFuture.completedFuture(mock(ClientCnx.class))).when(handler).getClientCnx(anyString()); @@ -75,7 +81,9 @@ public void testRequestCredits() throws PulsarServerException { @Test public void testMinRequestCredits() throws PulsarServerException { - PulsarClient pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); PulsarService pulsarService = mock(PulsarService.class); when(pulsarService.getClient()).thenReturn(pulsarClient); TransactionBufferHandlerImpl handler = spy(new TransactionBufferHandlerImpl(pulsarService, null, 50, 3000)); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java index 7493b25ac1d90..6dcf7931d73f4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java @@ -213,14 +213,14 @@ public void testSyncNormalPositionWhenTBRecover(boolean clientEnableTransaction, position = topicTransactionBuffer.getMaxReadPosition(); assertEquals(position, PositionImpl.EARLIEST); + // change to None state can recover + field.set(topicTransactionBuffer, TopicTransactionBufferState.State.None); + // invoke recover Method method = TopicTransactionBuffer.class.getDeclaredMethod("recover"); method.setAccessible(true); method.invoke(topicTransactionBuffer); - // change to None state can recover - field.set(topicTransactionBuffer, TopicTransactionBufferState.State.None); - // recover success again checkTopicTransactionBufferState(clientEnableTransaction, topicTransactionBuffer); @@ -232,13 +232,15 @@ public void testSyncNormalPositionWhenTBRecover(boolean clientEnableTransaction, private void checkTopicTransactionBufferState(boolean clientEnableTransaction, TopicTransactionBuffer topicTransactionBuffer) { // recover success - Awaitility.await().until(() -> { + Awaitility.await().untilAsserted(() -> { if (clientEnableTransaction) { // recover success, client enable transaction will change to Ready State - return topicTransactionBuffer.getStats(false, false).state.equals(Ready.name()); + assertEquals(topicTransactionBuffer.getStats(false, false).state, + Ready.name()); } else { // recover success, client disable transaction will change to NoSnapshot State - return topicTransactionBuffer.getStats(false, false).state.equals(NoSnapshot.name()); + assertEquals(topicTransactionBuffer.getStats(false, false).state, + NoSnapshot.name()); } }); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java index 8189f8e86b5b3..3bd8b920a30fe 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java @@ -49,6 +49,7 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.AuthAction; import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.zookeeper.KeeperException.Code; import org.awaitility.Awaitility; @@ -497,4 +498,48 @@ public void testCleanupEmptyTopicAuthenticationMap() throws Exception { .get().auth_policies.getTopicAuthentication().containsKey(topic)); }); } + + @Test + public void testCleanupEmptySubscriptionAuthenticationMap() throws Exception { + Map authParams = new HashMap<>(); + authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); + authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); + Authentication authTls = new AuthenticationTls(); + authTls.configure(authParams); + internalSetup(authTls); + + admin.clusters().createCluster("test", ClusterData.builder().build()); + admin.tenants().createTenant("p1", + new TenantInfoImpl(Collections.emptySet(), new HashSet<>(admin.clusters().getClusters()))); + String namespace = "p1/ns1"; + admin.namespaces().createNamespace("p1/ns1"); + + // grant permission1 and permission2 + String subscription = "test-sub-1"; + String role1 = "test-user-1"; + String role2 = "test-user-2"; + Set roles = new HashSet<>(); + roles.add(role1); + roles.add(role2); + admin.namespaces().grantPermissionOnSubscription(namespace, subscription, roles); + Optional policies = pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get(namespace)); + assertTrue(policies.isPresent()); + assertTrue(policies.get().auth_policies.getSubscriptionAuthentication().containsKey(subscription)); + assertTrue(policies.get().auth_policies.getSubscriptionAuthentication().get(subscription).contains(role1)); + assertTrue(policies.get().auth_policies.getSubscriptionAuthentication().get(subscription).contains(role2)); + + // revoke permission1 + admin.namespaces().revokePermissionOnSubscription(namespace, subscription, role1); + policies = pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get(namespace)); + assertTrue(policies.isPresent()); + assertTrue(policies.get().auth_policies.getSubscriptionAuthentication().containsKey(subscription)); + assertFalse(policies.get().auth_policies.getSubscriptionAuthentication().get(subscription).contains(role1)); + assertTrue(policies.get().auth_policies.getSubscriptionAuthentication().get(subscription).contains(role2)); + + // revoke permission2 + admin.namespaces().revokePermissionOnSubscription(namespace, subscription, role2); + policies = pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get(namespace)); + assertTrue(policies.isPresent()); + assertFalse(policies.get().auth_policies.getSubscriptionAuthentication().containsKey(subscription)); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java index e3bd321d76332..042c9b328d58b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java @@ -30,6 +30,7 @@ import org.apache.pulsar.client.impl.auth.AuthenticationTls; import org.apache.pulsar.common.tls.PublicSuffixMatcher; import org.apache.pulsar.common.tls.TlsHostnameVerifier; +import org.assertj.core.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; @@ -141,6 +142,7 @@ public void testTlsSyncProducerAndConsumerWithInvalidBrokerHost(boolean hostname // setup broker cert which has CN = "pulsar" different than broker's hostname="localhost" conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); + conf.setAuthenticationProviders(Sets.newTreeSet(AuthenticationProviderTls.class.getName())); conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setTlsCertificateFilePath(TLS_MIM_SERVER_CERT_FILE_PATH); conf.setTlsKeyFilePath(TLS_MIM_SERVER_KEY_FILE_PATH); @@ -182,6 +184,7 @@ public void testTlsSyncProducerAndConsumerCorrectBrokerHost() throws Exception { // setup broker cert which has CN = "localhost" conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); + conf.setAuthenticationProviders(Sets.newTreeSet(AuthenticationProviderTls.class.getName())); conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java index bdbe8efc8e6ef..9a36e0683b422 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java @@ -119,6 +119,7 @@ protected void cleanup() throws Exception { public void testProducerAndConsumerAuthorization() throws Exception { log.info("-- Starting {} test --", methodName); cleanup(); + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthorizationProvider(TestAuthorizationProvider.class.getName()); setup(); @@ -179,6 +180,7 @@ public void testProducerAndConsumerAuthorization() throws Exception { public void testSubscriberPermission() throws Exception { log.info("-- Starting {} test --", methodName); cleanup(); + conf.setTopicLevelPoliciesEnabled(false); conf.setEnablePackagesManagement(true); conf.setPackagesManagementStorageProvider(MockedPackagesStorageProvider.class.getName()); conf.setAuthorizationProvider(PulsarAuthorizationProvider.class.getName()); @@ -369,6 +371,7 @@ public void testSubscriberPermission() throws Exception { public void testClearBacklogPermission() throws Exception { log.info("-- Starting {} test --", methodName); cleanup(); + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthorizationProvider(PulsarAuthorizationProvider.class.getName()); setup(); @@ -610,6 +613,7 @@ public void testUpdateTopicPropertiesAuthorization() throws Exception { public void testSubscriptionPrefixAuthorization() throws Exception { log.info("-- Starting {} test --", methodName); cleanup(); + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthorizationProvider(TestAuthorizationProviderWithSubscriptionPrefix.class.getName()); setup(); @@ -749,6 +753,7 @@ public void testAuthData() throws Exception { public void testPermissionForProducerCreateInitialSubscription() throws Exception { log.info("-- Starting {} test --", methodName); cleanup(); + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthorizationProvider(PulsarAuthorizationProvider.class.getName()); setup(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java index 792f419ee997e..0a4c5b7a318b3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java @@ -27,10 +27,12 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; +import com.github.benmanes.caffeine.cache.AsyncLoadingCache; import com.google.common.collect.Sets; import com.google.common.util.concurrent.MoreExecutors; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; +import io.prometheus.client.CollectorRegistry; import java.io.IOException; import java.io.InputStream; import java.lang.reflect.Field; @@ -69,9 +71,17 @@ import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl; import org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerWrapper; import org.apache.pulsar.broker.loadbalance.impl.SimpleResourceUnit; +import org.apache.pulsar.broker.namespace.NamespaceEphemeralData; import org.apache.pulsar.broker.namespace.NamespaceService; +import org.apache.pulsar.broker.namespace.OwnedBundle; +import org.apache.pulsar.broker.namespace.OwnershipCache; +import org.apache.pulsar.broker.namespace.ServiceUnitUtils; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.testcontext.PulsarTestContext; +import org.apache.pulsar.client.impl.BinaryProtoLookupService; +import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.LookupService; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.ServiceUnitId; @@ -83,6 +93,7 @@ import org.apache.pulsar.common.util.SecurityUtility; import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats; +import org.apache.zookeeper.KeeperException; import org.asynchttpclient.AsyncCompletionHandler; import org.asynchttpclient.AsyncHttpClient; import org.asynchttpclient.AsyncHttpClientConfig; @@ -94,6 +105,7 @@ import org.asynchttpclient.Response; import org.asynchttpclient.channel.DefaultKeepAliveStrategy; import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; @@ -161,7 +173,7 @@ public void testMultipleBrokerLookup() throws Exception { // mock: return Broker2 as a Least-loaded broker when leader receives request [3] doReturn(true).when(loadManager1).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar2.getSafeWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar2.getBrokerId(), null); doReturn(Optional.of(resourceUnit)).when(loadManager1).getLeastLoaded(any(ServiceUnitId.class)); doReturn(Optional.of(resourceUnit)).when(loadManager2).getLeastLoaded(any(ServiceUnitId.class)); loadManagerField.set(pulsar.getNamespaceService(), new AtomicReference<>(loadManager1)); @@ -294,7 +306,7 @@ public void testMultipleBrokerDifferentClusterLookup() throws Exception { // mock: return Broker2 as a Least-loaded broker when leader receives request doReturn(true).when(loadManager2).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar2.getSafeWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar2.getBrokerId(), null); doReturn(Optional.of(resourceUnit)).when(loadManager2).getLeastLoaded(any(ServiceUnitId.class)); loadManagerField.set(pulsar.getNamespaceService(), new AtomicReference<>(loadManager2)); /**** started broker-2 ****/ @@ -474,7 +486,7 @@ public void testWebserviceServiceTls() throws Exception { // request [3] doReturn(true).when(loadManager1).isCentralized(); doReturn(true).when(loadManager2).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getBrokerId(), null); doReturn(Optional.of(resourceUnit)).when(loadManager2).getLeastLoaded(any(ServiceUnitId.class)); doReturn(Optional.of(resourceUnit)).when(loadManager1).getLeastLoaded(any(ServiceUnitId.class)); @@ -507,6 +519,9 @@ public void testWebserviceServiceTls() throws Exception { loadManager1 = null; loadManager2 = null; + + conf.setBrokerServicePortTls(Optional.empty()); + conf.setWebServicePortTls(Optional.empty()); } /** @@ -565,7 +580,7 @@ public void testSplitUnloadLookupTest() throws Exception { loadManagerField.set(pulsar2.getNamespaceService(), new AtomicReference<>(loadManager2)); // mock: return Broker1 as a Least-loaded broker when leader receives request [3] doReturn(true).when(loadManager1).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getSafeWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getBrokerId(), null); doReturn(Optional.of(resourceUnit)).when(loadManager1).getLeastLoaded(any(ServiceUnitId.class)); doReturn(Optional.of(resourceUnit)).when(loadManager2).getLeastLoaded(any(ServiceUnitId.class)); loadManagerField.set(pulsar.getNamespaceService(), new AtomicReference<>(loadManager1)); @@ -680,7 +695,7 @@ public void testModularLoadManagerSplitBundle() throws Exception { loadManagerField.set(pulsar2.getNamespaceService(), new AtomicReference<>(loadManager2)); // mock: return Broker1 as a Least-loaded broker when leader receives request [3] doReturn(true).when(loadManager1).isCentralized(); - SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getSafeWebServiceAddress(), null); + SimpleResourceUnit resourceUnit = new SimpleResourceUnit(pulsar.getBrokerId(), null); Optional res = Optional.of(resourceUnit); doReturn(res).when(loadManager1).getLeastLoaded(any(ServiceUnitId.class)); doReturn(res).when(loadManager2).getLeastLoaded(any(ServiceUnitId.class)); @@ -746,7 +761,7 @@ public void testModularLoadManagerSplitBundle() throws Exception { }); // Unload the NamespacePolicies and AntiAffinity check. - String currentBroker = String.format("%s:%d", "localhost", pulsar.getListenPortHTTP().get()); + String currentBroker = pulsar.getBrokerId(); assertTrue(loadManager.shouldNamespacePoliciesUnload(namespace,"0x00000000_0xffffffff", currentBroker)); assertTrue(loadManager.shouldAntiAffinityNamespaceUnload(namespace,"0x00000000_0xffffffff", currentBroker)); @@ -823,6 +838,104 @@ public void testSkipSplitBundleIfOnlyOneBroker() throws Exception { } } + @Test + public void testMergeGetPartitionedMetadataRequests() throws Exception { + // Assert the lookup service is a "BinaryProtoLookupService". + final PulsarClientImpl pulsarClientImpl = (PulsarClientImpl) pulsarClient; + final LookupService lookupService = pulsarClientImpl.getLookup(); + assertTrue(lookupService instanceof BinaryProtoLookupService); + + final String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + final int topicPartitions = 10; + admin.topics().createPartitionedTopic(tpName, topicPartitions); + + // Verify the request is works after merge the requests. + List> futures = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + futures.add(lookupService.getPartitionedTopicMetadata(TopicName.get(tpName))); + } + for (CompletableFuture future : futures) { + assertEquals(future.join().partitions, topicPartitions); + } + + // cleanup. + admin.topics().deletePartitionedTopic(tpName); + } + + @Test + public void testMergeLookupRequests() throws Exception { + // Assert the lookup service is a "BinaryProtoLookupService". + final PulsarClientImpl pulsarClientImpl = (PulsarClientImpl) pulsarClient; + final LookupService lookupService = pulsarClientImpl.getLookup(); + assertTrue(lookupService instanceof BinaryProtoLookupService); + + final String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + admin.topics().createNonPartitionedTopic(tpName); + + // Create 1 producer and 100 consumers. + List> producers = new ArrayList<>(); + List> consumers = new ArrayList<>(); + for (int i = 0; i < 20; i++) { + producers.add(pulsarClient.newProducer(Schema.STRING).topic(tpName).create()); + } + for (int i = 0; i < 20; i++) { + consumers.add(pulsarClient.newConsumer(Schema.STRING).topic(tpName).subscriptionName("s" + i).subscribe()); + } + + // Verify the lookup count will be smaller than before improve. + int lookupCountBeforeUnload = calculateLookupRequestCount(); + admin.namespaces().unload(TopicName.get(tpName).getNamespace()); + Awaitility.await().untilAsserted(() -> { + for (Producer p : producers) { + assertEquals(WhiteboxImpl.getInternalState(p, "state").toString(), "Ready"); + } + for (Consumer c : consumers) { + assertEquals(WhiteboxImpl.getInternalState(c, "state").toString(), "Ready"); + } + }); + int lookupCountAfterUnload = calculateLookupRequestCount(); + log.info("lookup count before unload: {}, after unload: {}", lookupCountBeforeUnload, lookupCountAfterUnload); + assertTrue(lookupCountAfterUnload < lookupCountBeforeUnload * 2, + "the lookup count should be smaller than before improve"); + + // Verify the producers and consumers is still works. + List messagesSent = new ArrayList<>(); + int index = 0; + for (Producer producer: producers) { + String message = Integer.valueOf(index++).toString(); + producer.send(message); + messagesSent.add(message); + } + HashSet messagesReceived = new HashSet<>(); + for (Consumer consumer : consumers) { + while (true) { + Message msg = consumer.receive(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + messagesReceived.add(msg.getValue()); + } + } + assertEquals(messagesReceived.size(), producers.size()); + + // cleanup. + for (Producer producer: producers) { + producer.close(); + } + for (Consumer consumer : consumers) { + consumer.close(); + } + admin.topics().delete(tpName); + } + + private int calculateLookupRequestCount() throws Exception { + int failures = CollectorRegistry.defaultRegistry.getSampleValue("pulsar_broker_lookup_failures_total") + .intValue(); + int answers = CollectorRegistry.defaultRegistry.getSampleValue("pulsar_broker_lookup_answers_total") + .intValue(); + return failures + answers; + } + @Test(timeOut = 10000) public void testPartitionedMetadataWithDeprecatedVersion() throws Exception { @@ -840,6 +953,8 @@ public void testPartitionedMetadataWithDeprecatedVersion() throws Exception { admin.topics().createPartitionedTopic(dest.toString(), totalPartitions); stopBroker(); + conf.setBrokerServicePortTls(Optional.empty()); + conf.setWebServicePortTls(Optional.empty()); conf.setClientLibraryVersionCheckEnabled(true); startBroker(); @@ -997,4 +1112,139 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat return "invalid"; } } + + @Test + public void testLookupConnectionNotCloseIfGetUnloadingExOrMetadataEx() throws Exception { + String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + admin.topics().createNonPartitionedTopic(tpName); + PulsarClientImpl pulsarClientImpl = (PulsarClientImpl) pulsarClient; + Producer producer = pulsarClientImpl.newProducer(Schema.STRING).topic(tpName).create(); + Consumer consumer = pulsarClientImpl.newConsumer(Schema.STRING).topic(tpName) + .subscriptionName("s1").isAckReceiptEnabled(true).subscribe(); + LookupService lookupService = pulsarClientImpl.getLookup(); + assertTrue(lookupService instanceof BinaryProtoLookupService); + ClientCnx lookupConnection = pulsarClientImpl.getCnxPool().getConnection(lookupService.resolveHost()).join(); + + // Verify the socket will not be closed if the bundle is unloading. + BundleOfTopic bundleOfTopic = new BundleOfTopic(tpName); + bundleOfTopic.setBundleIsUnloading(); + try { + lookupService.getBroker(TopicName.get(tpName)).get(); + fail("It should failed due to the namespace bundle is unloading."); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("is being unloaded")); + } + // Do unload topic, trigger producer & consumer reconnection. + pulsar.getBrokerService().getTopic(tpName, false).join().get().close(true); + assertTrue(lookupConnection.ctx().channel().isActive()); + bundleOfTopic.setBundleIsNotUnloading(); + // Assert producer & consumer could reconnect successful. + producer.send("1"); + HashSet messagesReceived = new HashSet<>(); + while (true) { + Message msg = consumer.receive(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + messagesReceived.add(msg.getValue()); + } + assertTrue(messagesReceived.contains("1")); + + // Verify the socket will not be closed if get a metadata ex. + bundleOfTopic.releaseBundleLockAndMakeAcquireFail(); + try { + lookupService.getBroker(TopicName.get(tpName)).get(); + fail("It should failed due to the acquire bundle lock fail."); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("OperationTimeout")); + } + // Do unload topic, trigger producer & consumer reconnection. + pulsar.getBrokerService().getTopic(tpName, false).join().get().close(true); + assertTrue(lookupConnection.ctx().channel().isActive()); + bundleOfTopic.makeAcquireBundleLockSuccess(); + // Assert producer could reconnect successful. + producer.send("2"); + while (true) { + Message msg = consumer.receive(2, TimeUnit.SECONDS); + if (msg == null) { + break; + } + messagesReceived.add(msg.getValue()); + } + assertTrue(messagesReceived.contains("2")); + + // cleanup. + producer.close(); + consumer.close(); + admin.topics().delete(tpName); + } + + private class BundleOfTopic { + + private NamespaceBundle namespaceBundle; + private OwnershipCache ownershipCache; + private AsyncLoadingCache ownedBundlesCache; + + public BundleOfTopic(String tpName) { + namespaceBundle = pulsar.getNamespaceService().getBundle(TopicName.get(tpName)); + ownershipCache = pulsar.getNamespaceService().getOwnershipCache(); + ownedBundlesCache = WhiteboxImpl.getInternalState(ownershipCache, "ownedBundlesCache"); + } + + private void setBundleIsUnloading() { + ownedBundlesCache.get(namespaceBundle).join().setActive(false); + } + + private void setBundleIsNotUnloading() { + ownedBundlesCache.get(namespaceBundle).join().setActive(true); + } + + private void releaseBundleLockAndMakeAcquireFail() throws Exception { + ownedBundlesCache.synchronous().invalidateAll(); + mockZooKeeper.delete(ServiceUnitUtils.path(namespaceBundle), -1); + mockZooKeeper.setAlwaysFail(KeeperException.Code.OPERATIONTIMEOUT); + } + + private void makeAcquireBundleLockSuccess() throws Exception { + mockZooKeeper.unsetAlwaysFail(); + } + } + + @Test(timeOut = 30000) + public void testLookupConnectionNotCloseIfFailedToAcquireOwnershipOfBundle() throws Exception { + String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + admin.topics().createNonPartitionedTopic(tpName); + final var pulsarClientImpl = (PulsarClientImpl) pulsarClient; + final var cache = pulsar.getNamespaceService().getOwnershipCache(); + final var bundle = pulsar.getNamespaceService().getBundle(TopicName.get(tpName)); + final var value = cache.getOwnerAsync(bundle).get().orElse(null); + assertNotNull(value); + + cache.invalidateLocalOwnerCache(); + final var lock = pulsar.getCoordinationService().getLockManager(NamespaceEphemeralData.class) + .acquireLock(ServiceUnitUtils.path(bundle), new NamespaceEphemeralData()).join(); + lock.updateValue(null); + log.info("Updated bundle {} with null", bundle.getBundleRange()); + + // wait for the system topic reader to __change_events is closed, otherwise the test will be affected + Thread.sleep(500); + + final var future = pulsarClientImpl.getLookup().getBroker(TopicName.get(tpName)); + final var cnx = pulsarClientImpl.getCnxPool().getConnections().stream().findAny() + .map(CompletableFuture::join).orElse(null); + assertNotNull(cnx); + + try { + future.get(); + fail(); + } catch (ExecutionException e) { + log.info("getBroker failed with {}: {}", e.getCause().getClass().getName(), e.getMessage()); + assertTrue(e.getCause() instanceof PulsarClientException.BrokerMetadataException); + assertTrue(cnx.ctx().channel().isActive()); + lock.updateValue(value); + lock.release(); + assertTrue(e.getMessage().contains("Failed to acquire ownership")); + pulsarClientImpl.getLookup().getBroker(TopicName.get(tpName)).get(); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java index c9b243257c4e1..d716d5a806392 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java @@ -22,6 +22,7 @@ import static org.testng.Assert.assertTrue; import static org.testng.Assert.expectThrows; import java.util.HashSet; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -50,7 +51,8 @@ protected void doInitConf() throws Exception { Set providers = new HashSet<>(); providers.add(AuthenticationProviderTls.class.getName()); conf.setAuthenticationProviders(providers); - + conf.setWebServicePortTls(Optional.of(0)); + conf.setBrokerServicePortTls(Optional.of(0)); conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java index 5e3731fbf24fc..2a0cb3187d208 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java @@ -29,6 +29,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; @@ -46,9 +47,10 @@ import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -@Test(groups = "flaky") +@Test(groups = "broker-impl") public class DeadLetterTopicTest extends ProducerConsumerBase { private static final Logger log = LoggerFactory.getLogger(DeadLetterTopicTest.class); @@ -56,6 +58,7 @@ public class DeadLetterTopicTest extends ProducerConsumerBase { @BeforeMethod(alwaysRun = true) @Override protected void setup() throws Exception { + this.conf.setMaxMessageSize(5 * 1024); super.internalSetup(); super.producerBaseSetup(); } @@ -66,6 +69,15 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + private String createMessagePayload(int size) { + StringBuilder str = new StringBuilder(); + Random rand = new Random(); + for (int i = 0; i < size; i++) { + str.append(rand.nextInt(10)); + } + return str.toString(); + } + @Test public void testDeadLetterTopicWithMessageKey() throws Exception { final String topic = "persistent://my-property/my-ns/dead-letter-topic"; @@ -125,9 +137,13 @@ public void testDeadLetterTopicWithMessageKey() throws Exception { consumer.close(); } + @DataProvider(name = "produceLargeMessages") + public Object[][] produceLargeMessages() { + return new Object[][] { { false }, { true } }; + } - @Test(groups = "quarantine") - public void testDeadLetterTopic() throws Exception { + @Test(dataProvider = "produceLargeMessages") + public void testDeadLetterTopic(boolean produceLargeMessages) throws Exception { final String topic = "persistent://my-property/my-ns/dead-letter-topic"; final int maxRedeliveryCount = 2; @@ -154,28 +170,44 @@ public void testDeadLetterTopic() throws Exception { Producer producer = pulsarClient.newProducer(Schema.BYTES) .topic(topic) + .enableChunking(produceLargeMessages) + .enableBatching(!produceLargeMessages) .create(); + Map messageContent = new HashMap<>(); + for (int i = 0; i < sendMessages; i++) { - producer.send(String.format("Hello Pulsar [%d]", i).getBytes()); + String data; + if (!produceLargeMessages) { + data = String.format("Hello Pulsar [%d]", i); + } else { + data = createMessagePayload(1024 * 10); + } + producer.newMessage().key(String.valueOf(i)).value(data.getBytes()).send(); + messageContent.put(i, data); } producer.close(); int totalReceived = 0; do { - Message message = consumer.receive(); - log.info("consumer received message : {} {}", message.getMessageId(), new String(message.getData())); + Message message = consumer.receive(5, TimeUnit.SECONDS); + assertNotNull(message, "The consumer should be able to receive messages."); + log.info("consumer received message : {}", message.getMessageId()); totalReceived++; } while (totalReceived < sendMessages * (maxRedeliveryCount + 1)); int totalInDeadLetter = 0; do { - Message message = deadLetterConsumer.receive(); - log.info("dead letter consumer received message : {} {}", message.getMessageId(), new String(message.getData())); + Message message = deadLetterConsumer.receive(5, TimeUnit.SECONDS); + assertNotNull(message, "the deadLetterConsumer should receive messages."); + assertEquals(new String(message.getData()), messageContent.get(Integer.parseInt(message.getKey()))); + messageContent.remove(Integer.parseInt(message.getKey())); + log.info("dead letter consumer received message : {}", message.getMessageId()); deadLetterConsumer.acknowledge(message); totalInDeadLetter++; } while (totalInDeadLetter < sendMessages); + assertTrue(messageContent.isEmpty()); deadLetterConsumer.close(); consumer.close(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java index fc103a46027c0..bd0119823fd95 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java @@ -692,8 +692,8 @@ public void testBlockBrokerDispatching() { try { final int waitMills = 500; final int maxUnAckPerBroker = 200; - final double unAckMsgPercentagePerDispatcher = 10; - int maxUnAckPerDispatcher = (int) ((maxUnAckPerBroker * unAckMsgPercentagePerDispatcher) / 100); // 200 * + final double unAckMsgPercentagePerDispatcher = 0.1; + int maxUnAckPerDispatcher = (int) (maxUnAckPerBroker * unAckMsgPercentagePerDispatcher); // 200 * // 10% = 20 // messages pulsar.getConfiguration().setMaxUnackedMessagesPerBroker(maxUnAckPerBroker); @@ -907,8 +907,8 @@ public void testBrokerDispatchBlockAndSubAckBackRequiredMsgs() { .getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked(); try { final int maxUnAckPerBroker = 200; - final double unAckMsgPercentagePerDispatcher = 10; - int maxUnAckPerDispatcher = (int) ((maxUnAckPerBroker * unAckMsgPercentagePerDispatcher) / 100); // 200 * + final double unAckMsgPercentagePerDispatcher = 0.1; + int maxUnAckPerDispatcher = (int) (maxUnAckPerBroker * unAckMsgPercentagePerDispatcher); // 200 * // 10% = 20 // messages pulsar.getConfiguration().setMaxUnackedMessagesPerBroker(maxUnAckPerBroker); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/InjectedClientCnxClientBuilder.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/InjectedClientCnxClientBuilder.java new file mode 100644 index 0000000000000..d29dd4f7061b8 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/InjectedClientCnxClientBuilder.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.api; + +import io.netty.channel.EventLoopGroup; +import java.util.concurrent.ThreadFactory; +import org.apache.pulsar.client.impl.ClientBuilderImpl; +import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.ConnectionPool; +import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.client.util.ExecutorProvider; +import org.apache.pulsar.common.util.netty.EventLoopUtil; + +public class InjectedClientCnxClientBuilder { + + public static PulsarClientImpl create(final ClientBuilderImpl clientBuilder, + final ClientCnxFactory clientCnxFactory) throws Exception { + ClientConfigurationData conf = clientBuilder.getClientConfigurationData(); + ThreadFactory threadFactory = new ExecutorProvider + .ExtendedThreadFactory("pulsar-client-io", Thread.currentThread().isDaemon()); + EventLoopGroup eventLoopGroup = + EventLoopUtil.newEventLoopGroup(conf.getNumIoThreads(), conf.isEnableBusyWait(), threadFactory); + + // Inject into ClientCnx. + ConnectionPool pool = new ConnectionPool(conf, eventLoopGroup, + () -> clientCnxFactory.generate(conf, eventLoopGroup)); + + return new PulsarClientImpl(conf, eventLoopGroup, pool); + } + + public interface ClientCnxFactory { + + ClientCnx generate(ClientConfigurationData conf, EventLoopGroup eventLoopGroup); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java index 315ce378d6953..bb8bab29ad9ef 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java @@ -371,4 +371,36 @@ public void testMultipleIOThreads() throws PulsarAdminException, PulsarClientExc assertTrue(consumer instanceof MultiTopicsConsumerImpl); assertTrue(consumer.isConnected()); } + + @Test(timeOut = 30000) + public void testSubscriptionNotFound() throws PulsarAdminException, PulsarClientException { + final var topic1 = newTopicName(); + final var topic2 = newTopicName(); + + pulsar.getConfiguration().setAllowAutoSubscriptionCreation(false); + + try { + final var singleTopicConsumer = pulsarClient.newConsumer() + .topic(topic1) + .subscriptionName("sub-1") + .isAckReceiptEnabled(true) + .subscribe(); + assertTrue(singleTopicConsumer instanceof ConsumerImpl); + } catch (Throwable t) { + assertTrue(t.getCause().getCause() instanceof PulsarClientException.SubscriptionNotFoundException); + } + + try { + final var multiTopicsConsumer = pulsarClient.newConsumer() + .topics(List.of(topic1, topic2)) + .subscriptionName("sub-2") + .isAckReceiptEnabled(true) + .subscribe(); + assertTrue(multiTopicsConsumer instanceof MultiTopicsConsumerImpl); + } catch (Throwable t) { + assertTrue(t.getCause().getCause() instanceof PulsarClientException.SubscriptionNotFoundException); + } + + pulsar.getConfiguration().setAllowAutoSubscriptionCreation(true); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MutualAuthenticationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MutualAuthenticationTest.java index 2fc8aebf64a4a..81d65b192049b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MutualAuthenticationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MutualAuthenticationTest.java @@ -195,7 +195,7 @@ protected void setup() throws Exception { Set superUserRoles = new HashSet<>(); superUserRoles.add("admin"); conf.setSuperUserRoles(superUserRoles); - + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthorizationEnabled(true); conf.setAuthenticationEnabled(true); Set providersClassNames = Sets.newHashSet(MutualAuthenticationProvider.class.getName()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/OrphanPersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/OrphanPersistentTopicTest.java new file mode 100644 index 0000000000000..7cd9da7574dbb --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/OrphanPersistentTopicTest.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.api; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import java.lang.reflect.Field; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.broker.service.TopicPoliciesService; +import org.apache.pulsar.broker.service.TopicPolicyListener; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.TopicPolicies; +import org.apache.pulsar.compaction.CompactionServiceFactory; +import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker-api") +public class OrphanPersistentTopicTest extends ProducerConsumerBase { + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testNoOrphanTopicAfterCreateTimeout() throws Exception { + // Make the topic loading timeout faster. + int topicLoadTimeoutSeconds = 2; + long originalTopicLoadTimeoutSeconds = pulsar.getConfig().getTopicLoadTimeoutSeconds(); + pulsar.getConfig().setTopicLoadTimeoutSeconds(2); + + String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + String mlPath = BrokerService.MANAGED_LEDGER_PATH_ZNODE + "/" + TopicName.get(tpName).getPersistenceNamingEncoding(); + + // Make topic load timeout 5 times. + AtomicInteger timeoutCounter = new AtomicInteger(); + for (int i = 0; i < 5; i++) { + mockZooKeeper.delay(topicLoadTimeoutSeconds * 2 * 1000, (op, path) -> { + if (mlPath.equals(path)) { + log.info("Topic load timeout: " + timeoutCounter.incrementAndGet()); + return true; + } + return false; + }); + } + + // Load topic. + CompletableFuture> consumer = pulsarClient.newConsumer() + .topic(tpName) + .subscriptionName("my-sub") + .subscribeAsync(); + + // After create timeout 5 times, the topic will be created successful. + Awaitility.await().ignoreExceptions().atMost(40, TimeUnit.SECONDS).untilAsserted(() -> { + CompletableFuture> future = pulsar.getBrokerService().getTopic(tpName, false); + assertTrue(future.isDone()); + Optional optional = future.get(); + assertTrue(optional.isPresent()); + }); + + // Assert only one PersistentTopic was not closed. + TopicPoliciesService topicPoliciesService = pulsar.getTopicPoliciesService(); + Map>> listeners = + WhiteboxImpl.getInternalState(topicPoliciesService, "listeners"); + assertEquals(listeners.get(TopicName.get(tpName)).size(), 1); + + // cleanup. + consumer.join().close(); + admin.topics().delete(tpName, false); + pulsar.getConfig().setTopicLoadTimeoutSeconds(originalTopicLoadTimeoutSeconds); + } + + @Test + public void testNoOrphanTopicIfInitFailed() throws Exception { + String tpName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + admin.topics().createNonPartitionedTopic(tpName); + + // Load topic. + Consumer consumer = pulsarClient.newConsumer() + .topic(tpName) + .subscriptionName("my-sub") + .subscribe(); + + // Make the method `PersitentTopic.initialize` fail. + Field fieldCompactionServiceFactory = PulsarService.class.getDeclaredField("compactionServiceFactory"); + fieldCompactionServiceFactory.setAccessible(true); + CompactionServiceFactory compactionServiceFactory = + (CompactionServiceFactory) fieldCompactionServiceFactory.get(pulsar); + fieldCompactionServiceFactory.set(pulsar, null); + admin.topics().unload(tpName); + + // Wait for failed to create topic for several times. + Thread.sleep(5 * 1000); + + // Remove the injected error, the topic will be created successful. + fieldCompactionServiceFactory.set(pulsar, compactionServiceFactory); + // We do not know the next time of consumer reconnection, so wait for 2 minutes to avoid flaky. It will be + // very fast in normal. + Awaitility.await().ignoreExceptions().atMost(120, TimeUnit.SECONDS).untilAsserted(() -> { + CompletableFuture> future = pulsar.getBrokerService().getTopic(tpName, false); + assertTrue(future.isDone()); + Optional optional = future.get(); + assertTrue(optional.isPresent()); + }); + + // Assert only one PersistentTopic was not closed. + TopicPoliciesService topicPoliciesService = pulsar.getTopicPoliciesService(); + Map>> listeners = + WhiteboxImpl.getInternalState(topicPoliciesService, "listeners"); + assertEquals(listeners.get(TopicName.get(tpName)).size(), 1); + + // cleanup. + consumer.close(); + admin.topics().delete(tpName, false); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerDisallowAutoCreateTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerDisallowAutoCreateTopicTest.java new file mode 100644 index 0000000000000..728e556f0224a --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerDisallowAutoCreateTopicTest.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.api; + +import static org.apache.pulsar.client.util.RetryMessageUtil.RETRY_GROUP_TOPIC_SUFFIX; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.BrokerTestUtil; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker-api") +public class SimpleProducerConsumerDisallowAutoCreateTopicTest extends ProducerConsumerBase { + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + conf.setAllowAutoTopicCreation(false); + } + + @Test + public void testClearErrorIfRetryTopicNotExists() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://public/default/tp_"); + final String subName = "sub"; + final String retryTopicName = topicName + "-" + subName + RETRY_GROUP_TOPIC_SUFFIX; + admin.topics().createNonPartitionedTopic(topicName); + Consumer consumer = null; + try { + consumer = pulsarClient.newConsumer() + .topic(topicName) + .subscriptionName(subName) + .enableRetry(true) + .subscribe(); + fail(""); + } catch (Exception ex) { + log.info("got an expected error", ex); + assertTrue(ex.getMessage().contains("Not found:")); + assertTrue(ex.getMessage().contains(retryTopicName)); + } finally { + // cleanup. + if (consumer != null) { + consumer.close(); + } + admin.topics().delete(topicName); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenAuthenticatedProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenAuthenticatedProducerConsumerTest.java index 87f12e6acdcb2..4d5e7deaf7d99 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenAuthenticatedProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenAuthenticatedProducerConsumerTest.java @@ -37,6 +37,7 @@ import java.util.concurrent.TimeUnit; import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.slf4j.Logger; @@ -92,6 +93,8 @@ protected void setup() throws Exception { Set providers = new HashSet<>(); providers.add(AuthenticationProviderToken.class.getName()); conf.setAuthenticationProviders(providers); + conf.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); + conf.setBrokerClientAuthenticationParameters("token:" + ADMIN_TOKEN); conf.setClusterName("test"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenOauth2AuthenticatedProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenOauth2AuthenticatedProducerConsumerTest.java index fdf41c4a6ada1..ba43ee6d6a2dd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenOauth2AuthenticatedProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/TokenOauth2AuthenticatedProducerConsumerTest.java @@ -27,7 +27,9 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.time.Duration; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -39,6 +41,7 @@ import org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.util.ObjectMapperFactory; import org.awaitility.Awaitility; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -87,11 +90,12 @@ protected void setup() throws Exception { conf.setAuthenticationProviders(providers); conf.setBrokerClientAuthenticationPlugin(AuthenticationOAuth2.class.getName()); - conf.setBrokerClientAuthenticationParameters("{\n" - + " \"privateKey\": \"" + CREDENTIALS_FILE + "\",\n" - + " \"issuerUrl\": \"" + server.getIssuer() + "\",\n" - + " \"audience\": \"" + audience + "\",\n" - + "}\n"); + final Map oauth2Param = new HashMap<>(); + oauth2Param.put("privateKey", CREDENTIALS_FILE); + oauth2Param.put("issuerUrl", server.getIssuer()); + oauth2Param.put("audience", audience); + conf.setBrokerClientAuthenticationParameters(ObjectMapperFactory + .getMapper().getObjectMapper().writeValueAsString(oauth2Param)); conf.setClusterName("test"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java index fe0aa4dd4953b..79ffada4a90c8 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java @@ -31,9 +31,13 @@ import java.util.function.Supplier; import java.util.stream.IntStream; import io.netty.util.concurrent.Promise; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.common.api.proto.CommandCloseProducer; import org.apache.pulsar.common.util.netty.EventLoopUtil; +import org.awaitility.Awaitility; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterClass; @@ -80,6 +84,36 @@ public void testSingleIpAddress() throws Exception { eventLoop.shutdownGracefully(); } + @Test + public void testSelectConnectionForSameProducer() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://sample/standalone/ns/tp_"); + admin.topics().createNonPartitionedTopic(topicName); + final CommandCloseProducer commandCloseProducer = new CommandCloseProducer(); + // 10 connection per broker. + final PulsarClient clientWith10ConPerBroker = PulsarClient.builder().connectionsPerBroker(10) + .serviceUrl(lookupUrl.toString()).build(); + ProducerImpl producer = (ProducerImpl) clientWith10ConPerBroker.newProducer().topic(topicName).create(); + commandCloseProducer.setProducerId(producer.producerId); + // An error will be reported when the Producer reconnects using a different connection. + // If no error is reported, the same connection was used when reconnecting. + for (int i = 0; i < 20; i++) { + // Trigger reconnect + ClientCnx cnx = producer.getClientCnx(); + if (cnx != null) { + cnx.handleCloseProducer(commandCloseProducer); + Awaitility.await().untilAsserted(() -> + Assert.assertEquals(producer.getState().toString(), HandlerState.State.Ready.toString(), + "The producer uses a different connection when reconnecting") + ); + } + } + + // cleanup. + producer.close(); + clientWith10ConPerBroker.close(); + admin.topics().delete(topicName); + } + @Test public void testDoubleIpAddress() throws Exception { ClientConfigurationData conf = new ClientConfigurationData(); @@ -205,20 +239,23 @@ protected void doResolveAll(SocketAddress socketAddress, Promise promise) throws ClientCnx cnx = pool.getConnection( InetSocketAddress.createUnresolved("proxy", 9999), - InetSocketAddress.createUnresolved("proxy", 9999)).get(); + InetSocketAddress.createUnresolved("proxy", 9999), + pool.genRandomKeyToSelectCon()).get(); Assert.assertEquals(cnx.remoteHostName, "proxy"); Assert.assertNull(cnx.proxyToTargetBrokerAddress); cnx = pool.getConnection( InetSocketAddress.createUnresolved("broker1", 9999), - InetSocketAddress.createUnresolved("proxy", 9999)).get(); + InetSocketAddress.createUnresolved("proxy", 9999), + pool.genRandomKeyToSelectCon()).get(); Assert.assertEquals(cnx.remoteHostName, "proxy"); Assert.assertEquals(cnx.proxyToTargetBrokerAddress, "broker1:9999"); cnx = pool.getConnection( InetSocketAddress.createUnresolved("broker2", 9999), - InetSocketAddress.createUnresolved("broker2", 9999)).get(); + InetSocketAddress.createUnresolved("broker2", 9999), + pool.genRandomKeyToSelectCon()).get(); Assert.assertEquals(cnx.remoteHostName, "broker2"); Assert.assertNull(cnx.proxyToTargetBrokerAddress); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeyStoreTlsProducerConsumerTestWithAuthTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeyStoreTlsProducerConsumerTestWithAuthTest.java index 8e508b6cf2068..77405e142013a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeyStoreTlsProducerConsumerTestWithAuthTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeyStoreTlsProducerConsumerTestWithAuthTest.java @@ -32,6 +32,7 @@ import io.jsonwebtoken.SignatureAlgorithm; import lombok.Cleanup; +import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.authentication.AuthenticationProviderTls; import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; @@ -49,6 +50,7 @@ import org.apache.pulsar.client.impl.auth.AuthenticationToken; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.util.ObjectMapperFactory; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -83,6 +85,7 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + @SneakyThrows protected void internalSetUpForBroker() { conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); @@ -114,6 +117,25 @@ protected void internalSetUpForBroker() { conf.setAuthenticationProviders(providers); conf.setNumExecutorThreadPoolSize(5); + Set tlsProtocols = Sets.newConcurrentHashSet(); + tlsProtocols.add("TLSv1.3"); + tlsProtocols.add("TLSv1.2"); + conf.setBrokerClientAuthenticationPlugin(AuthenticationKeyStoreTls.class.getName()); + Map authParams = new HashMap<>(); + authParams.put(AuthenticationKeyStoreTls.KEYSTORE_TYPE, KEYSTORE_TYPE); + authParams.put(AuthenticationKeyStoreTls.KEYSTORE_PATH, CLIENT_KEYSTORE_FILE_PATH); + authParams.put(AuthenticationKeyStoreTls.KEYSTORE_PW, CLIENT_KEYSTORE_PW); + conf.setBrokerClientAuthenticationParameters(ObjectMapperFactory.getMapper() + .getObjectMapper().writeValueAsString(authParams)); + conf.setBrokerClientTlsEnabled(true); + conf.setBrokerClientTlsEnabledWithKeyStore(true); + conf.setBrokerClientTlsTrustStore(BROKER_TRUSTSTORE_FILE_PATH); + conf.setBrokerClientTlsTrustStorePassword(BROKER_TRUSTSTORE_PW); + conf.setBrokerClientTlsKeyStore(CLIENT_KEYSTORE_FILE_PATH); + conf.setBrokerClientTlsKeyStoreType(KEYSTORE_TYPE); + conf.setBrokerClientTlsKeyStorePassword(CLIENT_KEYSTORE_PW); + conf.setBrokerClientTlsProtocols(tlsProtocols); + } protected void internalSetUpForClient(boolean addCertificates, String lookupUrl) throws Exception { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingDeduplicationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingDeduplicationTest.java new file mode 100644 index 0000000000000..5e590414132a5 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingDeduplicationTest.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import static org.apache.pulsar.client.impl.MessageChunkingSharedTest.sendChunk; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; +import java.util.concurrent.TimeUnit; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.Schema; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker-impl") +public class MessageChunkingDeduplicationTest extends ProducerConsumerBase { + + @BeforeClass + @Override + protected void setup() throws Exception { + this.conf.setBrokerDeduplicationEnabled(true); + super.internalSetup(); + super.producerBaseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testSendChunkMessageWithSameSequenceID() throws Exception { + String topicName = "persistent://my-property/my-ns/testSendChunkMessageWithSameSequenceID"; + String producerName = "test-producer"; + @Cleanup + Consumer consumer = pulsarClient + .newConsumer(Schema.STRING) + .subscriptionName("test-sub") + .topic(topicName) + .subscribe(); + @Cleanup + Producer producer = pulsarClient + .newProducer(Schema.STRING) + .producerName(producerName) + .topic(topicName) + .enableChunking(true) + .enableBatching(false) + .create(); + int messageSize = 6000; // payload size in KB + String message = "a".repeat(messageSize * 1000); + producer.newMessage().value(message).sequenceId(10).send(); + Message msg = consumer.receive(10, TimeUnit.SECONDS); + assertNotNull(msg); + assertTrue(msg.getMessageId() instanceof ChunkMessageIdImpl); + assertEquals(msg.getValue(), message); + producer.newMessage().value(message).sequenceId(10).send(); + msg = consumer.receive(3, TimeUnit.SECONDS); + assertNull(msg); + } + + @Test + public void testDeduplicateChunksInSingleChunkMessages() throws Exception { + String topicName = "persistent://my-property/my-ns/testDeduplicateChunksInSingleChunkMessage"; + String producerName = "test-producer"; + @Cleanup + Consumer consumer = pulsarClient + .newConsumer(Schema.STRING) + .subscriptionName("test-sub") + .topic(topicName) + .subscribe(); + final PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService() + .getTopicIfExists(topicName).get().orElse(null); + assertNotNull(persistentTopic); + sendChunk(persistentTopic, producerName, 1, 0, 2); + sendChunk(persistentTopic, producerName, 1, 1, 2); + sendChunk(persistentTopic, producerName, 1, 1, 2); + + Message message = consumer.receive(15, TimeUnit.SECONDS); + assertEquals(message.getData().length, 2); + + sendChunk(persistentTopic, producerName, 2, 0, 3); + sendChunk(persistentTopic, producerName, 2, 1, 3); + sendChunk(persistentTopic, producerName, 2, 1, 3); + sendChunk(persistentTopic, producerName, 2, 2, 3); + message = consumer.receive(20, TimeUnit.SECONDS); + assertEquals(message.getData().length, 3); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingSharedTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingSharedTest.java index 163c42d835b35..3d24d3746d66a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingSharedTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingSharedTest.java @@ -23,6 +23,7 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -34,6 +35,7 @@ import java.util.concurrent.TimeUnit; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.ConsumerBuilder; @@ -45,7 +47,6 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; -import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.protocol.Commands; import org.awaitility.Awaitility; @@ -217,7 +218,7 @@ private static void sendNonChunk(final PersistentTopic persistentTopic, sendChunk(persistentTopic, producerName, sequenceId, null, null); } - private static void sendChunk(final PersistentTopic persistentTopic, + protected static void sendChunk(final PersistentTopic persistentTopic, final String producerName, final long sequenceId, final Integer chunkId, @@ -233,16 +234,33 @@ private static void sendChunk(final PersistentTopic persistentTopic, metadata.setTotalChunkMsgSize(numChunks); } final ByteBuf buf = Commands.serializeMetadataAndPayload(Commands.ChecksumType.Crc32c, metadata, - PulsarByteBufAllocator.DEFAULT.buffer(1)); - persistentTopic.publishMessage(buf, (e, ledgerId, entryId) -> { - String name = producerName + "-" + sequenceId; - if (chunkId != null) { - name += "-" + chunkId + "-" + numChunks; + Unpooled.wrappedBuffer("a".getBytes())); + persistentTopic.publishMessage(buf, new Topic.PublishContext() { + @Override + public boolean isChunked() { + return chunkId != null; } - if (e == null) { - log.info("Sent {} to ({}, {})", name, ledgerId, entryId); - } else { - log.error("Failed to send {}: {}", name, e.getMessage()); + + @Override + public String getProducerName() { + return producerName; + } + + public long getSequenceId() { + return sequenceId; + } + + @Override + public void completed(Exception e, long ledgerId, long entryId) { + String name = producerName + "-" + sequenceId; + if (chunkId != null) { + name += "-" + chunkId + "-" + numChunks; + } + if (e == null) { + log.info("Sent {} to ({}, {})", name, ledgerId, entryId); + } else { + log.error("Failed to send {}: {}", name, e.getMessage()); + } } }); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java index 2797d66f7fe21..f266afd8a2ee1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java @@ -356,6 +356,80 @@ public void testMaxPendingChunkMessages() throws Exception { assertNull(consumer.receive(5, TimeUnit.SECONDS)); } + @Test + public void testResendChunkMessagesWithoutAckHole() throws Exception { + log.info("-- Starting {} test --", methodName); + final String topicName = "persistent://my-property/my-ns/testResendChunkMessagesWithoutAckHole"; + final String subName = "my-subscriber-name"; + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName(subName) + .maxPendingChunkedMessage(10) + .autoAckOldestChunkedMessageOnQueueFull(true) + .subscribe(); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .chunkMaxMessageSize(100) + .enableChunking(true) + .enableBatching(false) + .create(); + + sendSingleChunk(producer, "0", 0, 2); + + sendSingleChunk(producer, "0", 0, 2); // Resending the first chunk + sendSingleChunk(producer, "0", 1, 2); + + Message receivedMsg = consumer.receive(5, TimeUnit.SECONDS); + assertEquals(receivedMsg.getValue(), "chunk-0-0|chunk-0-1|"); + consumer.acknowledge(receivedMsg); + assertEquals(admin.topics().getStats(topicName).getSubscriptions().get(subName) + .getNonContiguousDeletedMessagesRanges(), 0); + } + + @Test + public void testResendChunkMessages() throws Exception { + log.info("-- Starting {} test --", methodName); + final String topicName = "persistent://my-property/my-ns/testResendChunkMessages"; + + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName("my-subscriber-name") + .maxPendingChunkedMessage(10) + .autoAckOldestChunkedMessageOnQueueFull(true) + .subscribe(); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .chunkMaxMessageSize(100) + .enableChunking(true) + .enableBatching(false) + .create(); + + sendSingleChunk(producer, "0", 0, 2); + + sendSingleChunk(producer, "0", 0, 2); // Resending the first chunk + sendSingleChunk(producer, "1", 0, 3); // This is for testing the interwoven chunked message + sendSingleChunk(producer, "1", 1, 3); + sendSingleChunk(producer, "1", 0, 3); // Resending the UUID-1 chunked message + + sendSingleChunk(producer, "0", 1, 2); + + Message receivedMsg = consumer.receive(5, TimeUnit.SECONDS); + assertEquals(receivedMsg.getValue(), "chunk-0-0|chunk-0-1|"); + consumer.acknowledge(receivedMsg); + + sendSingleChunk(producer, "1", 1, 3); + sendSingleChunk(producer, "1", 2, 3); + + receivedMsg = consumer.receive(5, TimeUnit.SECONDS); + assertEquals(receivedMsg.getValue(), "chunk-1-0|chunk-1-1|chunk-1-2|"); + consumer.acknowledge(receivedMsg); + Assert.assertEquals(((ConsumerImpl) consumer).getAvailablePermits(), 8); + } + /** * Validate that chunking is not supported with batching and non-persistent topic * diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java index a41aac9bd457f..d9bbc6a9d742a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java @@ -211,7 +211,7 @@ public void testReadMessageWithBatchingWithMessageInclusive() throws Exception { reader.close(); } - @Test(timeOut = 10000) + @Test public void testReaderWithTimeLong() throws Exception { String ns = "my-property/my-ns"; String topic = "persistent://" + ns + "/testReadFromPartition" + UUID.randomUUID(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java index 876fa98bce414..a6b77a1c72775 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java @@ -321,7 +321,7 @@ public void testNegativeAcksDeleteFromUnackedTracker() throws Exception { negativeAcksTracker.close(); } - @Test(timeOut = 10000) + @Test public void testNegativeAcksWithBatchAckEnabled() throws Exception { cleanup(); conf.setAcknowledgmentAtBatchIndexLevelEnabled(true); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplAuthTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplAuthTest.java index 76936334eb0ba..b9139dabdf021 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplAuthTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplAuthTest.java @@ -85,6 +85,7 @@ public void setup() throws Exception { // set isTcpLookup = true, to use BinaryProtoLookupService to get topics for a pattern. isTcpLookup = true; + conf.setTopicLevelPoliciesEnabled(false); conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplTest.java index c8f7b721cce6a..7707abafde8de 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PatternTopicsConsumerImplTest.java @@ -27,6 +27,7 @@ import com.google.common.collect.Lists; +import java.time.Duration; import java.util.List; import java.util.Optional; import java.util.concurrent.CompletableFuture; @@ -35,23 +36,30 @@ import java.util.stream.IntStream; import io.netty.util.Timeout; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.InjectedClientCnxClientBuilder; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.RegexSubscriptionMode; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.common.api.proto.BaseCommand; +import org.apache.pulsar.common.api.proto.CommandWatchTopicListSuccess; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.awaitility.Awaitility; +import org.awaitility.reflect.WhiteboxImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @Test(groups = "broker-impl") @@ -67,6 +75,7 @@ public void setup() throws Exception { isTcpLookup = true; // enabled transaction, to test pattern consumers not subscribe to transaction system topic. conf.setTransactionCoordinatorEnabled(true); + conf.setSubscriptionPatternMaxLength(10000); super.internalSetup(); super.producerBaseSetup(); } @@ -210,6 +219,12 @@ public void testBinaryProtoToGetTopicsOfNamespacePersistent() throws Exception { .subscribe(); assertTrue(consumer.getTopic().startsWith(PatternMultiTopicsConsumerImpl.DUMMY_TOPIC_NAME_PREFIX)); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + // 4. verify consumer get methods, to get right number of partitions and topics. assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); List topics = ((PatternMultiTopicsConsumerImpl) consumer).getPartitions(); @@ -287,6 +302,12 @@ public void testBinaryProtoSubscribeAllTopicOfNamespace() throws Exception { .subscribe(); assertTrue(consumer.getTopic().startsWith(PatternMultiTopicsConsumerImpl.DUMMY_TOPIC_NAME_PREFIX)); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + // 4. verify consumer get methods, to get right number of partitions and topics. assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); List topics = ((PatternMultiTopicsConsumerImpl) consumer).getPartitions(); @@ -364,6 +385,12 @@ public void testBinaryProtoToGetTopicsOfNamespaceNonPersistent() throws Exceptio .subscriptionTopicsMode(RegexSubscriptionMode.NonPersistentOnly) .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + // 4. verify consumer get methods, to get right number of partitions and topics. assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); List topics = ((PatternMultiTopicsConsumerImpl) consumer).getPartitions(); @@ -455,6 +482,12 @@ public void testBinaryProtoToGetTopicsOfNamespaceAll() throws Exception { .ackTimeout(ackTimeOutMillis, TimeUnit.MILLISECONDS) .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + // 4. verify consumer get methods, to get right number of partitions and topics. assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); List topics = ((PatternMultiTopicsConsumerImpl) consumer).getPartitions(); @@ -525,6 +558,11 @@ public void testStartEmptyPatternConsumer() throws Exception { .ackTimeout(ackTimeOutMillis, TimeUnit.MILLISECONDS) .receiverQueueSize(4) .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); // 3. verify consumer get methods, to get 5 number of partitions and topics. assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); @@ -589,13 +627,28 @@ public void testStartEmptyPatternConsumer() throws Exception { producer3.close(); } - @Test(timeOut = testTimeout) - public void testAutoSubscribePatterConsumerFromBrokerWatcher() throws Exception { - String key = "AutoSubscribePatternConsumer"; - String subscriptionName = "my-ex-subscription-" + key; + @DataProvider(name= "delayTypesOfWatchingTopics") + public Object[][] delayTypesOfWatchingTopics(){ + return new Object[][]{ + {true}, + {false} + }; + } - Pattern pattern = Pattern.compile("persistent://my-property/my-ns/pattern-topic.*"); - Consumer consumer = pulsarClient.newConsumer() + @Test(timeOut = testTimeout, dataProvider = "delayTypesOfWatchingTopics") + public void testAutoSubscribePatterConsumerFromBrokerWatcher(boolean delayWatchingTopics) throws Exception { + final String key = "AutoSubscribePatternConsumer"; + final String subscriptionName = "my-ex-subscription-" + key; + final Pattern pattern = Pattern.compile("persistent://my-property/my-ns/pattern-topic.*"); + + PulsarClient client = null; + if (delayWatchingTopics) { + client = createDelayWatchTopicsClient(); + } else { + client = pulsarClient; + } + + Consumer consumer = client.newConsumer() .topicsPattern(pattern) // Disable automatic discovery. .patternAutoDiscoveryPeriod(1000) @@ -620,7 +673,162 @@ public void testAutoSubscribePatterConsumerFromBrokerWatcher() throws Exception assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitionedTopics().size(), 1); }); + // cleanup. consumer.close(); + admin.topics().deletePartitionedTopic(topicName); + if (delayWatchingTopics) { + client.close(); + } + } + + @DataProvider(name= "regexpConsumerArgs") + public Object[][] regexpConsumerArgs(){ + return new Object[][]{ + {true, true}, + {true, false}, + {false, true}, + {false, false} + }; + } + + private void waitForTopicListWatcherStarted(Consumer consumer) { + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + log.info("isDone: {}, isCompletedExceptionally: {}", completableFuture.isDone(), + completableFuture.isCompletedExceptionally()); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + } + + @Test(timeOut = testTimeout, dataProvider = "regexpConsumerArgs") + public void testPreciseRegexpSubscribe(boolean partitioned, boolean createTopicAfterWatcherStarted) throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + final String subscriptionName = "s1"; + final Pattern pattern = Pattern.compile(String.format("%s$", topicName)); + + Consumer consumer = pulsarClient.newConsumer() + .topicsPattern(pattern) + // Disable automatic discovery. + .patternAutoDiscoveryPeriod(1000) + .subscriptionName(subscriptionName) + .subscriptionType(SubscriptionType.Shared) + .ackTimeout(ackTimeOutMillis, TimeUnit.MILLISECONDS) + .receiverQueueSize(4) + .subscribe(); + if (createTopicAfterWatcherStarted) { + waitForTopicListWatcherStarted(consumer); + } + + // 1. create topic. + if (partitioned) { + admin.topics().createPartitionedTopic(topicName, 1); + } else { + admin.topics().createNonPartitionedTopic(topicName); + } + + // 2. verify consumer can subscribe the topic. + assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); + Awaitility.await().untilAsserted(() -> { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitions().size(), 1); + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getConsumers().size(), 1); + if (partitioned) { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitionedTopics().size(), 1); + } else { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitionedTopics().size(), 0); + } + }); + + // cleanup. + consumer.close(); + if (partitioned) { + admin.topics().deletePartitionedTopic(topicName); + } else { + admin.topics().delete(topicName); + } + } + + @DataProvider(name= "partitioned") + public Object[][] partitioned(){ + return new Object[][]{ + {true}, + {true} + }; + } + + @Test(timeOut = 240 * 1000, dataProvider = "partitioned") + public void testPreciseRegexpSubscribeDisabledTopicWatcher(boolean partitioned) throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://public/default/tp"); + final String subscriptionName = "s1"; + final Pattern pattern = Pattern.compile(String.format("%s$", topicName)); + + // Close all ServerCnx by close client-side sockets to make the config changes effect. + pulsar.getConfig().setEnableBrokerSideSubscriptionPatternEvaluation(false); + reconnectAllConnections(); + + Consumer consumer = pulsarClient.newConsumer() + .topicsPattern(pattern) + // Disable brokerSideSubscriptionPatternEvaluation will leading disable topic list watcher. + // So set patternAutoDiscoveryPeriod to a little value. + .patternAutoDiscoveryPeriod(1) + .subscriptionName(subscriptionName) + .subscriptionType(SubscriptionType.Shared) + .ackTimeout(ackTimeOutMillis, TimeUnit.MILLISECONDS) + .receiverQueueSize(4) + .subscribe(); + + // 1. create topic. + if (partitioned) { + admin.topics().createPartitionedTopic(topicName, 1); + } else { + admin.topics().createNonPartitionedTopic(topicName); + } + + // 2. verify consumer can subscribe the topic. + // Since the minimum value of `patternAutoDiscoveryPeriod` is 60s, we set the test timeout to a triple value. + assertSame(pattern, ((PatternMultiTopicsConsumerImpl) consumer).getPattern()); + Awaitility.await().atMost(Duration.ofMinutes(3)).untilAsserted(() -> { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitions().size(), 1); + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getConsumers().size(), 1); + if (partitioned) { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitionedTopics().size(), 1); + } else { + assertEquals(((PatternMultiTopicsConsumerImpl) consumer).getPartitionedTopics().size(), 0); + } + }); + + // cleanup. + consumer.close(); + if (partitioned) { + admin.topics().deletePartitionedTopic(topicName); + } else { + admin.topics().delete(topicName); + } + // Close all ServerCnx by close client-side sockets to make the config changes effect. + pulsar.getConfig().setEnableBrokerSideSubscriptionPatternEvaluation(true); + reconnectAllConnections(); + } + + private PulsarClient createDelayWatchTopicsClient() throws Exception { + ClientBuilderImpl clientBuilder = (ClientBuilderImpl) PulsarClient.builder().serviceUrl(lookupUrl.toString()); + return InjectedClientCnxClientBuilder.create(clientBuilder, + (conf, eventLoopGroup) -> new ClientCnx(conf, eventLoopGroup) { + public CompletableFuture newWatchTopicList( + BaseCommand command, long requestId) { + // Inject 2 seconds delay when sending command New Watch Topics. + CompletableFuture res = new CompletableFuture<>(); + new Thread(() -> { + sleepSeconds(2); + super.newWatchTopicList(command, requestId).whenComplete((v, ex) -> { + if (ex != null) { + res.completeExceptionally(ex); + } else { + res.complete(v); + } + }); + }).start(); + return res; + } + }); } // simulate subscribe a pattern which has 3 topics, but then matched topic added in. @@ -665,6 +873,12 @@ public void testAutoSubscribePatternConsumer() throws Exception { .receiverQueueSize(4) .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + assertTrue(consumer instanceof PatternMultiTopicsConsumerImpl); // 4. verify consumer get methods, to get 6 number of partitions and topics: 6=1+2+3 @@ -775,6 +989,12 @@ public void testAutoUnsubscribePatternConsumer() throws Exception { .receiverQueueSize(4) .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + assertTrue(consumer instanceof PatternMultiTopicsConsumerImpl); // 4. verify consumer get methods, to get 0 number of partitions and topics: 6=1+2+3 @@ -861,6 +1081,12 @@ public void testTopicDeletion() throws Exception { .subscriptionName("sub") .subscribe(); + // Wait topic list watcher creation. + Awaitility.await().untilAsserted(() -> { + CompletableFuture completableFuture = WhiteboxImpl.getInternalState(consumer, "watcherFuture"); + assertTrue(completableFuture.isDone() && !completableFuture.isCompletedExceptionally()); + }); + assertTrue(consumer instanceof PatternMultiTopicsConsumerImpl); PatternMultiTopicsConsumerImpl consumerImpl = (PatternMultiTopicsConsumerImpl) consumer; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerConsumerInternalTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerConsumerInternalTest.java index 5f242d4495480..240d8d2304768 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerConsumerInternalTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerConsumerInternalTest.java @@ -19,21 +19,33 @@ package org.apache.pulsar.client.impl; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; + +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.ServerCnx; +import org.apache.pulsar.client.api.BatcherBuilder; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.ProducerConsumerBase; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.common.api.proto.CommandCloseProducer; import org.awaitility.Awaitility; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; /** * Different with {@link org.apache.pulsar.client.api.SimpleProducerConsumerTest}, this class can visit the variables - * of {@link ConsumerImpl} which are modified `protected`. + * of {@link ConsumerImpl} or {@link ProducerImpl} which have protected or default access modifiers. */ -@Test(groups = "broker-api") +@Slf4j +@Test(groups = "broker-impl") public class ProducerConsumerInternalTest extends ProducerConsumerBase { @BeforeClass(alwaysRun = true) @@ -49,6 +61,61 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + @Test + public void testSameProducerRegisterTwice() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + admin.topics().createNonPartitionedTopic(topicName); + + // Create producer using default producerName. + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer().topic(topicName).create(); + ServiceProducer serviceProducer = getServiceProducer(producer, topicName); + + // Remove producer maintained by server cnx. To make it can register the second time. + removeServiceProducerMaintainedByServerCnx(serviceProducer); + + // Trigger the client producer reconnect. + CommandCloseProducer commandCloseProducer = new CommandCloseProducer(); + commandCloseProducer.setProducerId(producer.producerId); + producer.getClientCnx().handleCloseProducer(commandCloseProducer); + + // Verify the reconnection will be success. + Awaitility.await().untilAsserted(() -> { + assertEquals(producer.getState().toString(), "Ready"); + }); + } + + @Test + public void testSameProducerRegisterTwiceWithSpecifiedProducerName() throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + final String pName = "p1"; + admin.topics().createNonPartitionedTopic(topicName); + + // Create producer using default producerName. + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer().producerName(pName).topic(topicName).create(); + ServiceProducer serviceProducer = getServiceProducer(producer, topicName); + + // Remove producer maintained by server cnx. To make it can register the second time. + removeServiceProducerMaintainedByServerCnx(serviceProducer); + + // Trigger the client producer reconnect. + CommandCloseProducer commandCloseProducer = new CommandCloseProducer(); + commandCloseProducer.setProducerId(producer.producerId); + producer.getClientCnx().handleCloseProducer(commandCloseProducer); + + // Verify the reconnection will be success. + Awaitility.await().untilAsserted(() -> { + assertEquals(producer.getState().toString(), "Ready", "The producer registration failed"); + }); + } + + private void removeServiceProducerMaintainedByServerCnx(ServiceProducer serviceProducer) { + ServerCnx serverCnx = (ServerCnx) serviceProducer.getServiceProducer().getCnx(); + serverCnx.removedProducer(serviceProducer.getServiceProducer()); + Awaitility.await().untilAsserted(() -> { + assertFalse(serverCnx.getProducers().containsKey(serviceProducer.getServiceProducer().getProducerId())); + }); + } + @Test public void testExclusiveConsumerWillAlwaysRetryEvenIfReceivedConsumerBusyError() throws Exception { final String topicName = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); @@ -87,4 +154,36 @@ public void testExclusiveConsumerWillAlwaysRetryEvenIfReceivedConsumerBusyError( consumer.close(); admin.topics().delete(topicName, false); } + + @DataProvider(name = "containerBuilder") + public Object[][] containerBuilderProvider() { + return new Object[][] { + { BatcherBuilder.DEFAULT }, + { BatcherBuilder.KEY_BASED } + }; + } + + @Test(timeOut = 30000, dataProvider = "containerBuilder") + public void testSendTimerCheckForBatchContainer(BatcherBuilder batcherBuilder) throws Exception { + final String topicName = BrokerTestUtil.newUniqueName("persistent://my-property/my-ns/tp_"); + @Cleanup Producer producer = pulsarClient.newProducer().topic(topicName) + .batcherBuilder(batcherBuilder) + .sendTimeout(1, TimeUnit.SECONDS) + .batchingMaxPublishDelay(100, TimeUnit.MILLISECONDS) + .batchingMaxMessages(1000) + .create(); + + log.info("Before sendAsync msg-0: {}", System.nanoTime()); + CompletableFuture future = producer.sendAsync("msg-0".getBytes()); + future.thenAccept(msgId -> log.info("msg-0 done: {} (msgId: {})", System.nanoTime(), msgId)); + future.get(); // t: the current time point + + ((ProducerImpl) producer).triggerSendTimer(); // t+1000ms && t+2000ms: run() will be called again + + Thread.sleep(1950); // t+2050ms: the batch timer is expired, which happens after run() is called + log.info("Before sendAsync msg-1: {}", System.nanoTime()); + future = producer.sendAsync("msg-1".getBytes()); + future.thenAccept(msgId -> log.info("msg-1 done: {} (msgId: {})", System.nanoTime(), msgId)); + future.get(); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java index 3ec784e248cba..d776fdb0ed915 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java @@ -23,7 +23,12 @@ import static org.mockito.Mockito.mock; import io.netty.buffer.ByteBufAllocator; import java.lang.reflect.Field; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import lombok.Cleanup; +import org.apache.pulsar.client.api.CompressionType; +import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.ProducerConsumerBase; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; @@ -35,9 +40,6 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.nio.charset.StandardCharsets; -import java.util.concurrent.TimeUnit; - @Test(groups = "broker-impl") public class ProducerMemoryLimitTest extends ProducerConsumerBase { @@ -191,6 +193,40 @@ public void testProducerCloseMemoryRelease() throws Exception { Assert.assertEquals(memoryLimitController.currentUsage(), 0); } + @Test(timeOut = 10_000) + public void testProducerBlockReserveMemory() throws Exception { + replacePulsarClient(PulsarClient.builder(). + serviceUrl(lookupUrl.toString()) + .memoryLimit(1, SizeUnit.KILO_BYTES)); + @Cleanup + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer() + .topic("testProducerMemoryLimit") + .sendTimeout(5, TimeUnit.SECONDS) + .compressionType(CompressionType.SNAPPY) + .messageRoutingMode(MessageRoutingMode.RoundRobinPartition) + .maxPendingMessages(0) + .blockIfQueueFull(true) + .enableBatching(true) + .batchingMaxMessages(100) + .batchingMaxBytes(65536) + .batchingMaxPublishDelay(100, TimeUnit.MILLISECONDS) + .create(); + int msgCount = 5; + CountDownLatch cdl = new CountDownLatch(msgCount); + for (int i = 0; i < msgCount; i++) { + producer.sendAsync("memory-test".getBytes(StandardCharsets.UTF_8)).whenComplete(((messageId, throwable) -> { + cdl.countDown(); + })); + } + + cdl.await(); + + producer.close(); + PulsarClientImpl clientImpl = (PulsarClientImpl) this.pulsarClient; + final MemoryLimitController memoryLimitController = clientImpl.getMemoryLimitController(); + Assert.assertEquals(memoryLimitController.currentUsage(), 0); + } + private void initClientWithMemoryLimit() throws PulsarClientException { replacePulsarClient(PulsarClient.builder(). serviceUrl(lookupUrl.toString()) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java index a725562ac40aa..6555c152bacad 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java @@ -122,7 +122,7 @@ public CompletableFuture getConnection(String topic) { result.completeExceptionally(new IOException("New connections are rejected.")); return result; } else { - return super.getConnection(topic); + return super.getConnection(topic, getCnxPool().genRandomKeyToSelectCon()); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImplTest.java index 9b8b1e5efb99c..d79a31c07f218 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawBatchMessageContainerImplTest.java @@ -47,7 +47,6 @@ import org.apache.pulsar.compaction.CompactionTest; import org.testng.Assert; import org.testng.annotations.BeforeMethod; -import org.testng.annotations.DataProvider; import org.testng.annotations.Test; public class RawBatchMessageContainerImplTest { @@ -56,8 +55,6 @@ public class RawBatchMessageContainerImplTest { CryptoKeyReader cryptoKeyReader; Map encryptKeys; - int maxBytesInBatch = 5 * 1024 * 1024; - public void setEncryptionAndCompression(boolean encrypt, boolean compress) { if (compress) { compressionType = ZSTD; @@ -107,22 +104,22 @@ public MessageImpl createMessage(String topic, String value, int entryId) { public void setup() throws Exception { setEncryptionAndCompression(false, true); } - @DataProvider(name = "testBatchLimitByMessageCount") - public static Object[][] testBatchLimitByMessageCount() { - return new Object[][] {{true}, {false}}; - } - - @Test(timeOut = 20000, dataProvider = "testBatchLimitByMessageCount") - public void testToByteBufWithBatchLimit(boolean testBatchLimitByMessageCount) throws IOException { - RawBatchMessageContainerImpl container = testBatchLimitByMessageCount ? - new RawBatchMessageContainerImpl(2, Integer.MAX_VALUE) : - new RawBatchMessageContainerImpl(Integer.MAX_VALUE, 5); + @Test(timeOut = 20000) + public void testToByteBufWithBatchLimit()throws IOException { + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); String topic = "my-topic"; - var full1 = container.add(createMessage(topic, "hi-1", 0), null); - var full2 = container.add(createMessage(topic, "hi-2", 1), null); + MessageImpl message1 = createMessage(topic, "hi-1", 0); + boolean hasEnoughSpase1 = container.haveEnoughSpace(message1); + var full1 = container.add(message1, null); assertFalse(full1); - assertTrue(full2); + assertTrue(hasEnoughSpase1); + MessageImpl message2 = createMessage(topic, "hi-2", 1); + boolean hasEnoughSpase2 = container.haveEnoughSpace(message2); + assertFalse(hasEnoughSpase2); + var full2 = container.add(message2, null); + assertFalse(full2); + ByteBuf buf = container.toByteBuf(); @@ -167,7 +164,7 @@ public void testToByteBufWithBatchLimit(boolean testBatchLimitByMessageCount) th public void testToByteBufWithCompressionAndEncryption() throws IOException { setEncryptionAndCompression(true, true); - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(2, maxBytesInBatch); + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); container.setCryptoKeyReader(cryptoKeyReader); String topic = "my-topic"; container.add(createMessage(topic, "hi-1", 0), null); @@ -217,7 +214,7 @@ public void testToByteBufWithCompressionAndEncryption() throws IOException { @Test public void testToByteBufWithSingleMessage() throws IOException { - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(2, maxBytesInBatch); + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); String topic = "my-topic"; container.add(createMessage(topic, "hi-1", 0), null); ByteBuf buf = container.toByteBuf(); @@ -250,25 +247,31 @@ public void testToByteBufWithSingleMessage() throws IOException { } @Test - public void testMaxNumMessagesInBatch() { - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(1, maxBytesInBatch); + public void testAddDifferentBatchMessage() { + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); String topic = "my-topic"; boolean isFull = container.add(createMessage(topic, "hi", 0), null); - Assert.assertTrue(isFull); - Assert.assertTrue(container.isBatchFull()); + Assert.assertFalse(isFull); + Assert.assertFalse(container.isBatchFull()); + MessageImpl message = createMessage(topic, "hi-1", 0); + Assert.assertTrue(container.haveEnoughSpace(message)); + isFull = container.add(message, null); + Assert.assertFalse(isFull); + message = createMessage(topic, "hi-2", 1); + Assert.assertFalse(container.haveEnoughSpace(message)); } @Test(expectedExceptions = UnsupportedOperationException.class) public void testCreateOpSendMsg() { - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(1, maxBytesInBatch); + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); container.createOpSendMsg(); } @Test public void testToByteBufWithEncryptionWithoutCryptoKeyReader() { setEncryptionAndCompression(true, false); - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(1, maxBytesInBatch); + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); String topic = "my-topic"; container.add(createMessage(topic, "hi-1", 0), null); Assert.assertEquals(container.getNumMessagesInBatch(), 1); @@ -286,7 +289,7 @@ public void testToByteBufWithEncryptionWithoutCryptoKeyReader() { @Test public void testToByteBufWithEncryptionWithInvalidEncryptKeys() { setEncryptionAndCompression(true, false); - RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(1, maxBytesInBatch); + RawBatchMessageContainerImpl container = new RawBatchMessageContainerImpl(); container.setCryptoKeyReader(cryptoKeyReader); encryptKeys = new HashMap<>(); encryptKeys.put(null, null); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java index a201ef104e7b3..95d8926c9ff80 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java @@ -30,8 +30,10 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.commons.lang3.tuple.ImmutableTriple; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.MessageId; @@ -51,6 +53,7 @@ import org.testng.annotations.Test; @Test(groups = "broker-impl") +@Slf4j public class RawReaderTest extends MockedPulsarServiceBaseTest { private static final String subscription = "foobar-sub"; @@ -62,6 +65,7 @@ public void setup() throws Exception { "org.apache.pulsar.common.intercept.AppendBrokerTimestampMetadataInterceptor", "org.apache.pulsar.common.intercept.AppendIndexMetadataInterceptor" )); + conf.setSystemTopicEnabled(false); conf.setExposingBrokerEntryMetadataToClientEnabled(true); super.internalSetup(); @@ -116,7 +120,7 @@ public static String extractKey(RawMessage m) { @Test public void testHasMessageAvailableWithoutBatch() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); Set keys = publishMessages(topic, numKeys); RawReader reader = RawReader.create(pulsarClient, topic, subscription).get(); while (true) { @@ -133,20 +137,18 @@ public void testHasMessageAvailableWithoutBatch() throws Exception { } } Assert.assertTrue(keys.isEmpty()); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testHasMessageAvailableWithBatch() throws Exception { int numKeys = 20; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); Set keys = publishMessages(topic, numKeys, true); RawReader reader = RawReader.create(pulsarClient, topic, subscription).get(); int messageCount = 0; while (true) { boolean hasMsg = reader.hasMessageAvailableAsync().get(); - if (hasMsg && (messageCount == numKeys)) { - Assert.fail("HasMessageAvailable shows still has message when there is no message"); - } if (hasMsg) { try (RawMessage m = reader.readNextAsync().get()) { MessageMetadata meta = Commands.parseMessageMetadata(m.getHeadersAndPayload()); @@ -163,13 +165,14 @@ public void testHasMessageAvailableWithBatch() throws Exception { } Assert.assertEquals(messageCount, numKeys); Assert.assertTrue(keys.isEmpty()); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testRawReader() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); Set keys = publishMessages(topic, numKeys); @@ -185,12 +188,13 @@ public void testRawReader() throws Exception { } } Assert.assertTrue(keys.isEmpty()); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testSeekToStart() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); publishMessages(topic, numKeys); @@ -219,12 +223,13 @@ public void testSeekToStart() throws Exception { } } Assert.assertTrue(readKeys.isEmpty()); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testSeekToMiddle() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); publishMessages(topic, numKeys); @@ -262,6 +267,7 @@ public void testSeekToMiddle() throws Exception { } } Assert.assertTrue(readKeys.isEmpty()); + reader.closeAsync().get(3, TimeUnit.SECONDS); } /** @@ -270,7 +276,7 @@ public void testSeekToMiddle() throws Exception { @Test public void testFlowControl() throws Exception { int numMessages = RawReaderImpl.DEFAULT_RECEIVER_QUEUE_SIZE * 5; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); publishMessages(topic, numMessages); @@ -296,12 +302,13 @@ public void testFlowControl() throws Exception { } Assert.assertEquals(timeouts, 1); Assert.assertEquals(keys.size(), numMessages); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testFlowControlBatch() throws Exception { int numMessages = RawReaderImpl.DEFAULT_RECEIVER_QUEUE_SIZE * 5; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); publishMessages(topic, numMessages, true); @@ -324,11 +331,12 @@ public void testFlowControlBatch() throws Exception { } } Assert.assertEquals(keys.size(), numMessages); + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testBatchingExtractKeysAndIds() throws Exception { - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); try (Producer producer = pulsarClient.newProducer().topic(topic) .maxPendingMessages(3) @@ -363,7 +371,7 @@ public void testBatchingExtractKeysAndIds() throws Exception { @Test public void testBatchingRebatch() throws Exception { - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); try (Producer producer = pulsarClient.newProducer().topic(topic) .maxPendingMessages(3) @@ -392,7 +400,7 @@ public void testBatchingRebatch() throws Exception { @Test public void testBatchingRebatchWithBrokerEntryMetadata() throws Exception { - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); try (Producer producer = pulsarClient.newProducer().topic(topic) .maxPendingMessages(3) @@ -428,7 +436,7 @@ public void testBatchingRebatchWithBrokerEntryMetadata() throws Exception { public void testAcknowledgeWithProperties() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); Set keys = publishMessages(topic, numKeys); @@ -459,14 +467,14 @@ public void testAcknowledgeWithProperties() throws Exception { Assert.assertEquals( ledger.openCursor(subscription).getProperties().get("foobar"), Long.valueOf(0xdeadbeefdecaL))); - + reader.closeAsync().get(3, TimeUnit.SECONDS); } @Test public void testReadCancellationOnClose() throws Exception { int numKeys = 10; - String topic = "persistent://my-property/my-ns/my-raw-topic"; + String topic = "persistent://my-property/my-ns/" + BrokerTestUtil.newUniqueName("reader"); publishMessages(topic, numKeys/2); RawReader reader = RawReader.create(pulsarClient, topic, subscription).get(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ReaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ReaderTest.java index a50c92f7ab8f4..85ff87593cffa 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ReaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ReaderTest.java @@ -733,4 +733,32 @@ public void testReaderListenerAcknowledgement() admin.topics().deletePartitionedTopic(partitionedTopic); } + @Test + public void testReaderReconnectedFromNextEntry() throws Exception { + final String topic = "persistent://my-property/my-ns/testReaderReconnectedFromNextEntry"; + Reader reader = pulsarClient.newReader(Schema.STRING).topic(topic).receiverQueueSize(1) + .startMessageId(MessageId.earliest).create(); + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topic).create(); + + // Send 3 and consume 1. + producer.send("1"); + producer.send("2"); + producer.send("3"); + Message msg1 = reader.readNext(2, TimeUnit.SECONDS); + assertEquals(msg1.getValue(), "1"); + + // Trigger reader reconnect. + admin.topics().unload(topic); + + // For non-durable we are going to restart from the next entry. + Message msg2 = reader.readNext(2, TimeUnit.SECONDS); + assertEquals(msg2.getValue(), "2"); + Message msg3 = reader.readNext(2, TimeUnit.SECONDS); + assertEquals(msg3.getValue(), "3"); + + // cleanup. + reader.close(); + producer.close(); + admin.topics().delete(topic, false); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TopicsConsumerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TopicsConsumerImplTest.java index 51b32c2b44ecf..c343ab0d6e294 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TopicsConsumerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TopicsConsumerImplTest.java @@ -34,6 +34,7 @@ import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.MessageIdAdv; +import org.apache.pulsar.client.api.MessageListener; import org.apache.pulsar.client.api.MessageRouter; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; @@ -57,22 +58,27 @@ import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; - import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; import java.util.Set; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.TreeSet; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -1394,4 +1400,76 @@ public Map getActiveConsumers() { } } + @DataProvider + public static Object[][] seekByFunction() { + return new Object[][] { + { true }, { false } + }; + } + + @Test(timeOut = 30000, dataProvider = "seekByFunction") + public void testSeekToNewerPosition(boolean seekByFunction) throws Exception { + final var topic1 = TopicName.get(newTopicName()).toString() + .replace("my-property", "public").replace("my-ns", "default"); + final var topic2 = TopicName.get(newTopicName()).toString() + .replace("my-property", "public").replace("my-ns", "default"); + @Cleanup final var producer1 = pulsarClient.newProducer(Schema.STRING).topic(topic1).create(); + @Cleanup final var producer2 = pulsarClient.newProducer(Schema.STRING).topic(topic2).create(); + producer1.send("1-0"); + producer2.send("2-0"); + producer1.send("1-1"); + producer2.send("2-1"); + final var consumer1 = pulsarClient.newConsumer(Schema.STRING) + .topics(Arrays.asList(topic1, topic2)).subscriptionName("sub") + .ackTimeout(1, TimeUnit.SECONDS) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe(); + final var timestamps = new ArrayList(); + for (int i = 0; i < 4; i++) { + timestamps.add(consumer1.receive().getPublishTime()); + } + timestamps.sort(Comparator.naturalOrder()); + final var timestamp = timestamps.get(2); + consumer1.close(); + + final Function, CompletableFuture> seekAsync = consumer -> { + final var future = seekByFunction ? consumer.seekAsync(__ -> timestamp) : consumer.seekAsync(timestamp); + assertEquals(((ConsumerBase) consumer).getIncomingMessageSize(), 0L); + assertEquals(((ConsumerBase) consumer).getTotalIncomingMessages(), 0); + assertTrue(((ConsumerBase) consumer).getUnAckedMessageTracker().isEmpty()); + return future; + }; + + @Cleanup final var consumer2 = pulsarClient.newConsumer(Schema.STRING) + .topics(Arrays.asList(topic1, topic2)).subscriptionName("sub-2") + .ackTimeout(1, TimeUnit.SECONDS) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe(); + seekAsync.apply(consumer2).get(); + final var values = new TreeSet(); + for (int i = 0; i < 2; i++) { + values.add(consumer2.receive().getValue()); + } + assertEquals(values, new TreeSet<>(Arrays.asList("1-1", "2-1"))); + + final var valuesInListener = new CopyOnWriteArrayList(); + @Cleanup final var consumer3 = pulsarClient.newConsumer(Schema.STRING) + .topics(Arrays.asList(topic1, topic2)).subscriptionName("sub-3") + .messageListener((MessageListener) (__, msg) -> valuesInListener.add(msg.getValue())) + .ackTimeout(1, TimeUnit.SECONDS) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe(); + seekAsync.apply(consumer3).get(); + if (valuesInListener.isEmpty()) { + Awaitility.await().untilAsserted(() -> assertEquals(valuesInListener.size(), 2)); + assertEquals(valuesInListener.stream().sorted().toList(), Arrays.asList("1-1", "2-1")); + } // else: consumer3 has passed messages to the listener before seek, in this case we cannot assume anything + + @Cleanup final var consumer4 = pulsarClient.newConsumer(Schema.STRING) + .topics(Arrays.asList(topic1, topic2)).subscriptionName("sub-4") + .ackTimeout(1, TimeUnit.SECONDS) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe(); + seekAsync.apply(consumer4).get(); + final var valuesInReceiveAsync = new ArrayList(); + valuesInReceiveAsync.add(consumer4.receiveAsync().get().getValue()); + valuesInReceiveAsync.add(consumer4.receiveAsync().get().getValue()); + assertEquals(valuesInReceiveAsync.stream().sorted().toList(), Arrays.asList("1-1", "2-1")); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java index 34cc3bc1ca526..348fb04b7dd23 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java @@ -254,6 +254,65 @@ private void testFilterMsgsInPendingAckStateWhenConsumerDisconnect(boolean enabl Assert.assertEquals(receiveCounter, count / 2); } + @Test + private void testMsgsInPendingAckStateWouldNotGetTheConsumerStuck() throws Exception { + final String topicName = NAMESPACE1 + "/testMsgsInPendingAckStateWouldNotGetTheConsumerStuck"; + final String subscription = "test"; + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.INT32) + .topic(topicName) + .create(); + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.INT32) + .topic(topicName) + .subscriptionName(subscription) + .subscriptionType(SubscriptionType.Shared) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + int numStep1Receive = 2, numStep2Receive = 2, numStep3Receive = 2; + int numTotalMessage = numStep1Receive + numStep2Receive + numStep3Receive; + + for (int i = 0; i < numTotalMessage; i++) { + producer.send(i); + } + + Transaction step1Txn = getTxn(); + Transaction step2Txn = getTxn(); + + // Step 1, try to consume some messages but do not commit the transaction + for (int i = 0; i < numStep1Receive; i++) { + consumer.acknowledgeAsync(consumer.receive().getMessageId(), step1Txn).get(); + } + + // Step 2, try to consume some messages and commit the transaction + for (int i = 0; i < numStep2Receive; i++) { + consumer.acknowledgeAsync(consumer.receive().getMessageId(), step2Txn).get(); + } + + // commit step2Txn + step2Txn.commit().get(); + + // close and re-create consumer + consumer.close(); + @Cleanup + Consumer consumer2 = pulsarClient.newConsumer(Schema.INT32) + .topic(topicName) + .receiverQueueSize(numStep3Receive) + .subscriptionName(subscription) + .subscriptionType(SubscriptionType.Shared) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + // Step 3, try to consume the rest messages and should receive all of them + for (int i = 0; i < numStep3Receive; i++) { + // should get the message instead of timeout + Message msg = consumer2.receive(3, TimeUnit.SECONDS); + Assert.assertEquals(msg.getValue(), numStep1Receive + numStep2Receive + i); + } + } + @Test(dataProvider="enableBatch") private void produceCommitTest(boolean enableBatch) throws Exception { @Cleanup diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java index 96adc67a27b2b..33e4e998ad6cf 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java @@ -38,6 +38,7 @@ import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.lang3.tuple.Pair; @@ -62,6 +63,7 @@ import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.util.FutureUtil; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -159,6 +161,7 @@ public void cleanup() throws Exception { @Test public void testEntryLookup() throws Exception { + @Cleanup BookKeeper bk = pulsar.getBookKeeperClientFactory().create( this.conf, null, null, Optional.empty(), null); @@ -214,6 +217,7 @@ public void testEntryLookup() throws Exception { @Test public void testCleanupOldCompactedTopicLedger() throws Exception { + @Cleanup BookKeeper bk = pulsar.getBookKeeperClientFactory().create( this.conf, null, null, Optional.empty(), null); @@ -840,4 +844,69 @@ public void testReadCompactedLatestMessageWithInclusive() throws Exception { Assert.assertTrue(reader.hasMessageAvailable()); Assert.assertEquals(reader.readNext().getMessageId(), lastMessage.get()); } + + @Test + public void testCompactWithConcurrentGetCompactionHorizonAndCompactedTopicContext() throws Exception { + @Cleanup + BookKeeper bk0 = pulsar.getBookKeeperClientFactory().create( + this.conf, null, null, Optional.empty(), null); + + final BookKeeper bk = Mockito.spy(bk0); + + Mockito.doAnswer(invocation -> { + Thread.sleep(1500); + invocation.callRealMethod(); + return null; + }).when(bk).asyncOpenLedger(Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); + + LedgerHandle oldCompactedLedger = bk.createLedger(1, 1, + Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, + Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD); + oldCompactedLedger.close(); + LedgerHandle newCompactedLedger = bk.createLedger(1, 1, + Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, + Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD); + newCompactedLedger.close(); + + CompactedTopicImpl compactedTopic = new CompactedTopicImpl(bk); + + PositionImpl oldHorizon = new PositionImpl(1, 2); + var future = CompletableFuture.supplyAsync(() -> { + // set the compacted topic ledger + return compactedTopic.newCompactedLedger(oldHorizon, oldCompactedLedger.getId()); + }); + Thread.sleep(500); + + Optional compactionHorizon = compactedTopic.getCompactionHorizon(); + CompletableFuture compactedTopicContext = + compactedTopic.getCompactedTopicContextFuture(); + + if (compactedTopicContext != null) { + Assert.assertEquals(compactionHorizon.get(), oldHorizon); + Assert.assertNotNull(compactedTopicContext); + Assert.assertEquals(compactedTopicContext.join().ledger.getId(), oldCompactedLedger.getId()); + } else { + Assert.assertTrue(compactionHorizon.isEmpty()); + } + + future.join(); + + PositionImpl newHorizon = new PositionImpl(1, 3); + var future2 = CompletableFuture.supplyAsync(() -> { + // update the compacted topic ledger + return compactedTopic.newCompactedLedger(newHorizon, newCompactedLedger.getId()); + }); + Thread.sleep(500); + + compactionHorizon = compactedTopic.getCompactionHorizon(); + compactedTopicContext = compactedTopic.getCompactedTopicContextFuture(); + + if (compactedTopicContext.join().ledger.getId() == newCompactedLedger.getId()) { + Assert.assertEquals(compactionHorizon.get(), newHorizon); + } else { + Assert.assertEquals(compactionHorizon.get(), oldHorizon); + } + + future2.join(); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicUtilsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicUtilsTest.java index 329abf9f780fa..2545c0362e82a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicUtilsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicUtilsTest.java @@ -25,8 +25,8 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.Entry; -import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerException; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.mockito.Mockito; import org.testng.Assert; @@ -46,8 +46,9 @@ public void testReadCompactedEntriesWithEmptyEntries() throws ExecutionException PositionImpl initPosition = PositionImpl.get(1, 90); AtomicReference readPositionRef = new AtomicReference<>(initPosition.getNext()); - ManagedCursor cursor = Mockito.mock(ManagedCursor.class); + ManagedCursorImpl cursor = Mockito.mock(ManagedCursorImpl.class); Mockito.doReturn(readPositionRef.get()).when(cursor).getReadPosition(); + Mockito.doReturn(1).when(cursor).applyMaxSizeCap(Mockito.anyInt(), Mockito.anyLong()); Mockito.doAnswer(invocation -> { readPositionRef.set(invocation.getArgument(0)); return null; @@ -68,8 +69,8 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { } }; - CompactedTopicUtils.asyncReadCompactedEntries(service, cursor, 1, 100, false, - readEntriesCallback, false, null); + CompactedTopicUtils.asyncReadCompactedEntries(service, cursor, 1, 100, + PositionImpl.LATEST, false, readEntriesCallback, false, null); List entries = completableFuture.get(); Assert.assertTrue(entries.isEmpty()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionRetentionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionRetentionTest.java index 055c595fbfec8..88d923f74e196 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionRetentionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionRetentionTest.java @@ -38,6 +38,7 @@ import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; @@ -45,9 +46,13 @@ import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.common.naming.SystemTopicNames; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.RetentionPolicies; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.awaitility.Awaitility; +import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -212,6 +217,51 @@ public void testCompactionRetentionOnTopicCreationWithTopicPolicies() throws Exc ); } + @Test + public void testRetentionPolicesForSystemTopic() throws Exception { + String namespace = "my-tenant/my-ns"; + String topicPrefix = "persistent://" + namespace + "/"; + admin.namespaces().setRetention(namespace, new RetentionPolicies(-1, -1)); + // Check event topics and transaction internal topics. + for (String eventTopic : SystemTopicNames.EVENTS_TOPIC_NAMES) { + checkSystemTopicRetentionPolicy(topicPrefix + eventTopic); + } + checkSystemTopicRetentionPolicy(topicPrefix + SystemTopicNames.TRANSACTION_COORDINATOR_ASSIGN); + checkSystemTopicRetentionPolicy(topicPrefix + SystemTopicNames.TRANSACTION_COORDINATOR_LOG); + checkSystemTopicRetentionPolicy(topicPrefix + SystemTopicNames.PENDING_ACK_STORE_SUFFIX); + + // Check common topics. + checkCommonTopicRetentionPolicy(topicPrefix + "my-topic" + System.nanoTime()); + // Specify retention policies for system topic. + pulsar.getConfiguration().setTopicLevelPoliciesEnabled(true); + pulsar.getConfiguration().setSystemTopicEnabled(true); + admin.topics().createNonPartitionedTopic(topicPrefix + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT); + admin.topicPolicies().setRetention(topicPrefix + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT, + new RetentionPolicies(10, 10)); + Awaitility.await().untilAsserted(() -> { + checkTopicRetentionPolicy(topicPrefix + SystemTopicNames.TRANSACTION_BUFFER_SNAPSHOT, + new RetentionPolicies(10, 10)); + }); + } + + private void checkSystemTopicRetentionPolicy(String topicName) throws Exception { + checkTopicRetentionPolicy(topicName, new RetentionPolicies(0, 0)); + + } + + private void checkCommonTopicRetentionPolicy(String topicName) throws Exception { + checkTopicRetentionPolicy(topicName, new RetentionPolicies(-1, -1)); + } + + private void checkTopicRetentionPolicy(String topicName, RetentionPolicies retentionPolicies) throws Exception { + ManagedLedgerConfig config = pulsar.getBrokerService() + .getManagedLedgerConfig(TopicName.get(topicName)).get(); + Assert.assertEquals(config.getRetentionSizeInMB(), retentionPolicies.getRetentionSizeInMB()); + Assert.assertEquals(config.getRetentionTimeMillis(), retentionPolicies.getRetentionTimeInMinutes() < 0 + ? retentionPolicies.getRetentionTimeInMinutes() + : retentionPolicies.getRetentionTimeInMinutes() * 60000L); + } + private void testCompactionCursorRetention(String topic) throws Exception { Set keys = Sets.newHashSet("a", "b", "c"); Set keysToExpire = Sets.newHashSet("x1", "x2"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java index d11a2f87192ff..c663e3fd46ac1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java @@ -25,6 +25,7 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -46,21 +47,29 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import lombok.Cleanup; import lombok.SneakyThrows; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.api.OpenBuilder; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.ManagedLedgerInfo; import org.apache.bookkeeper.mledger.Position; +import org.apache.commons.lang3.mutable.MutableLong; +import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.service.persistent.SystemTopic; +import org.apache.pulsar.client.admin.LongRunningProcessStatus; import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.CryptoKeyReader; @@ -70,12 +79,15 @@ import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.ProducerBuilder; +import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Reader; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.impl.BatchMessageIdImpl; -import org.apache.pulsar.client.impl.MessageIdImpl; +import org.apache.pulsar.client.impl.ConsumerImpl; +import org.apache.pulsar.common.api.proto.CommandAck; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; @@ -85,6 +97,7 @@ import org.apache.pulsar.common.protocol.Markers; import org.apache.pulsar.common.util.FutureUtil; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -92,6 +105,7 @@ import org.testng.annotations.Test; @Test(groups = "broker-impl") +@Slf4j public class CompactionTest extends MockedPulsarServiceBaseTest { protected ScheduledExecutorService compactionScheduler; protected BookKeeper bk; @@ -534,14 +548,8 @@ public void testBatchMessageIdsDontChange() throws Exception { Assert.assertEquals(message2.getKey(), "key2"); Assert.assertEquals(new String(message2.getData()), "my-message-3"); if (getCompactor() instanceof StrategicTwoPhaseCompactor) { - MessageIdImpl id = (MessageIdImpl) messages.get(0).getMessageId(); - MessageIdImpl id1 = new MessageIdImpl( - id.getLedgerId(), id.getEntryId(), id.getPartitionIndex()); - Assert.assertEquals(message1.getMessageId(), id1); - id = (MessageIdImpl) messages.get(2).getMessageId(); - MessageIdImpl id2 = new MessageIdImpl( - id.getLedgerId(), id.getEntryId(), id.getPartitionIndex()); - Assert.assertEquals(message2.getMessageId(), id2); + Assert.assertEquals(message1.getMessageId(), messages.get(0).getMessageId()); + Assert.assertEquals(message2.getMessageId(), messages.get(1).getMessageId()); } else { Assert.assertEquals(message1.getMessageId(), messages.get(0).getMessageId()); Assert.assertEquals(message2.getMessageId(), messages.get(2).getMessageId()); @@ -549,6 +557,60 @@ public void testBatchMessageIdsDontChange() throws Exception { } } + @Test + public void testBatchMessageWithNullValue() throws Exception { + String topic = "persistent://my-property/use/my-ns/my-topic1"; + + pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") + .receiverQueueSize(1).readCompacted(true).subscribe().close(); + + try (Producer producer = pulsarClient.newProducer().topic(topic) + .maxPendingMessages(3) + .enableBatching(true) + .batchingMaxMessages(3) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create() + ) { + // batch 1 + producer.newMessage().key("key1").value("my-message-1".getBytes()).sendAsync(); + producer.newMessage().key("key1").value(null).sendAsync(); + producer.newMessage().key("key2").value("my-message-3".getBytes()).send(); + + // batch 2 + producer.newMessage().key("key3").value("my-message-4".getBytes()).sendAsync(); + producer.newMessage().key("key3").value("my-message-5".getBytes()).sendAsync(); + producer.newMessage().key("key3").value("my-message-6".getBytes()).send(); + + // batch 3 + producer.newMessage().key("key4").value("my-message-7".getBytes()).sendAsync(); + producer.newMessage().key("key4").value(null).sendAsync(); + producer.newMessage().key("key5").value("my-message-9".getBytes()).send(); + } + + + // compact the topic + compact(topic); + + // Read messages before compaction to get ids + List> messages = new ArrayList<>(); + try (Consumer consumer = pulsarClient.newConsumer().topic(topic) + .subscriptionName("sub1").receiverQueueSize(1).readCompacted(true).subscribe()) { + while (true) { + Message message = consumer.receive(5, TimeUnit.SECONDS); + if (message == null) { + break; + } + messages.add(message); + } + } + + assertEquals(messages.size(), 3); + assertEquals(messages.get(0).getKey(), "key2"); + assertEquals(messages.get(1).getKey(), "key3"); + assertEquals(messages.get(2).getKey(), "key5"); + } + @Test public void testWholeBatchCompactedOut() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; @@ -585,8 +647,17 @@ public void testWholeBatchCompactedOut() throws Exception { } } - @Test - public void testKeyLessMessagesPassThrough() throws Exception { + @DataProvider(name = "retainNullKey") + public static Object[][] retainNullKey() { + return new Object[][] {{true}, {false}}; + } + + @Test(dataProvider = "retainNullKey") + public void testKeyLessMessagesPassThrough(boolean retainNullKey) throws Exception { + conf.setTopicCompactionRetainNullKey(retainNullKey); + restartBroker(); + FieldUtils.writeDeclaredField(compactor, "topicCompactionRetainNullKey", retainNullKey, true); + String topic = "persistent://my-property/use/my-ns/my-topic1"; // subscribe before sending anything, so that we get all messages @@ -627,29 +698,25 @@ public void testKeyLessMessagesPassThrough() throws Exception { Message m = consumer.receive(2, TimeUnit.SECONDS); assertNull(m); } else { - Message message1 = consumer.receive(); - Assert.assertFalse(message1.hasKey()); - Assert.assertEquals(new String(message1.getData()), "my-message-1"); - - Message message2 = consumer.receive(); - Assert.assertFalse(message2.hasKey()); - Assert.assertEquals(new String(message2.getData()), "my-message-2"); - - Message message3 = consumer.receive(); - Assert.assertEquals(message3.getKey(), "key1"); - Assert.assertEquals(new String(message3.getData()), "my-message-4"); - - Message message4 = consumer.receive(); - Assert.assertEquals(message4.getKey(), "key2"); - Assert.assertEquals(new String(message4.getData()), "my-message-6"); - - Message message5 = consumer.receive(); - Assert.assertFalse(message5.hasKey()); - Assert.assertEquals(new String(message5.getData()), "my-message-7"); + List> result = new ArrayList<>(); + while (true) { + Message message = consumer.receive(10, TimeUnit.SECONDS); + if (message == null) { + break; + } + result.add(Pair.of(message.getKey(), message.getData() == null ? null : new String(message.getData()))); + } - Message message6 = consumer.receive(); - Assert.assertFalse(message6.hasKey()); - Assert.assertEquals(new String(message6.getData()), "my-message-8"); + List> expectList; + if (retainNullKey) { + expectList = List.of( + Pair.of(null, "my-message-1"), Pair.of(null, "my-message-2"), + Pair.of("key1", "my-message-4"), Pair.of("key2", "my-message-6"), + Pair.of(null, "my-message-7"), Pair.of(null, "my-message-8")); + } else { + expectList = List.of(Pair.of("key1", "my-message-4"), Pair.of("key2", "my-message-6")); + } + Assert.assertEquals(result, expectList); } } } @@ -1711,9 +1778,9 @@ public void testReadUnCompacted(boolean batchEnabled) throws PulsarClientExcepti @SneakyThrows @Test public void testHealthCheckTopicNotCompacted() { - NamespaceName heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfiguration()); + NamespaceName heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(pulsar.getBrokerId(), pulsar.getConfiguration()); String topicV1 = "persistent://" + heartbeatNamespaceV1.toString() + "/healthcheck"; - NamespaceName heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), pulsar.getConfiguration()); + NamespaceName heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getBrokerId(), pulsar.getConfiguration()); String topicV2 = heartbeatNamespaceV2.toString() + "/healthcheck"; Producer producer1 = pulsarClient.newProducer().topic(topicV1).create(); Producer producer2 = pulsarClient.newProducer().topic(topicV2).create(); @@ -1783,4 +1850,495 @@ public void addFailed(ManagedLedgerException exception, Object ctx) { Assert.assertNotEquals(ledgerId, -1L); }); } + + @Test(timeOut = 100000) + public void testReceiverQueueSize() throws Exception { + final String topicName = "persistent://my-property/use/my-ns/testReceiverQueueSize" + UUID.randomUUID(); + final String subName = "my-sub"; + final int receiveQueueSize = 1; + @Cleanup + PulsarClient client = newPulsarClient(lookupUrl.toString(), 100); + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + + for (int i = 0; i < 10; i++) { + producer.newMessage().key(String.valueOf(i % 2)).value(String.valueOf(i)).sendAsync(); + } + producer.flush(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + ConsumerImpl consumer = (ConsumerImpl) client.newConsumer(Schema.STRING) + .topic(topicName).readCompacted(true).receiverQueueSize(receiveQueueSize).subscriptionName(subName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + //Give some time to consume + Awaitility.await() + .untilAsserted(() -> Assert.assertEquals(consumer.getStats().getMsgNumInReceiverQueue().intValue(), + receiveQueueSize)); + consumer.close(); + producer.close(); + } + + @Test + public void testDispatcherMaxReadSizeBytes() throws Exception { + final String topicName = + "persistent://my-property/use/my-ns/testDispatcherMaxReadSizeBytes" + UUID.randomUUID(); + final String subName = "my-sub"; + final int receiveQueueSize = 1; + @Cleanup + PulsarClient client = newPulsarClient(lookupUrl.toString(), 100); + Producer producer = pulsarClient.newProducer(Schema.BYTES) + .topic(topicName).create(); + + for (int i = 0; i < 10; i+=2) { + producer.newMessage().key(UUID.randomUUID().toString()).value(new byte[4*1024*1024]).send(); + } + producer.flush(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + admin.topics().unload(topicName); + + + PersistentTopic topic = + (PersistentTopic) pulsar.getBrokerService().getTopic(topicName, true, Map.of()).get().get(); + TopicCompactionService topicCompactionService = Mockito.spy(topic.getTopicCompactionService()); + FieldUtils.writeDeclaredField(topic, "topicCompactionService", topicCompactionService, true); + + ConsumerImpl consumer = (ConsumerImpl) client.newConsumer(Schema.BYTES) + .topic(topicName).readCompacted(true).receiverQueueSize(receiveQueueSize).subscriptionName(subName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + Awaitility.await().untilAsserted(() -> { + assertEquals(consumer.getStats().getMsgNumInReceiverQueue(), + 1); + }); + + Mockito.verify(topicCompactionService, Mockito.times(1)).readCompactedEntries(Mockito.any(), Mockito.same(1)); + + consumer.close(); + producer.close(); + } + + @Test + public void testCompactionDuplicate() throws Exception { + String topic = "persistent://my-property/use/my-ns/testCompactionDuplicate"; + final int numMessages = 1000; + final int maxKeys = 800; + + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); + + // trigger compaction (create __compaction cursor) + admin.topics().triggerCompaction(topic); + + Map expected = new HashMap<>(); + Random r = new Random(0); + + pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe().close(); + + for (int j = 0; j < numMessages; j++) { + int keyIndex = r.nextInt(maxKeys); + String key = "key" + keyIndex; + byte[] data = ("my-message-" + key + "-" + j).getBytes(); + producer.newMessage().key(key).value(data).send(); + expected.put(key, data); + } + + producer.flush(); + + // trigger compaction + admin.topics().triggerCompaction(topic); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topic).status, + LongRunningProcessStatus.Status.RUNNING); + }); + + // Wait for phase one to complete + Thread.sleep(500); + + // Unload topic make reader of compaction reconnect + admin.topics().unload(topic); + + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(topic, false); + // Compacted topic ledger should have same number of entry equals to number of unique key. + Assert.assertEquals(internalStats.compactedLedger.entries, expected.size()); + Assert.assertTrue(internalStats.compactedLedger.ledgerId > -1); + Assert.assertFalse(internalStats.compactedLedger.offloaded); + }); + + // consumer with readCompacted enabled only get compacted entries + try (Consumer consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") + .readCompacted(true).subscribe()) { + while (true) { + Message m = consumer.receive(2, TimeUnit.SECONDS); + Assert.assertEquals(expected.remove(m.getKey()), m.getData()); + if (expected.isEmpty()) { + break; + } + } + } + } + + @Test + public void testDeleteCompactedLedger() throws Exception { + String topicName = "persistent://my-property/use/my-ns/testDeleteCompactedLedger"; + + final String subName = "my-sub"; + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + + pulsarClient.newConsumer().topic(topicName).subscriptionName(subName).readCompacted(true).subscribe().close(); + + for (int i = 0; i < 10; i++) { + producer.newMessage().key(String.valueOf(i % 2)).value(String.valueOf(i)).sendAsync(); + } + producer.flush(); + + compact(topicName); + + MutableLong compactedLedgerId = new MutableLong(-1); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topicName); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1L); + compactedLedgerId.setValue(stats.compactedLedger.ledgerId); + Assert.assertEquals(stats.compactedLedger.entries, 2L); + }); + + // delete compacted ledger + admin.topics().deleteSubscription(topicName, "__compaction"); + + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topicName); + Assert.assertEquals(stats.compactedLedger.ledgerId, -1L); + Assert.assertEquals(stats.compactedLedger.entries, -1L); + assertThrows(BKException.BKNoSuchLedgerExistsException.class, () -> pulsarTestContext.getBookKeeperClient() + .openLedger(compactedLedgerId.getValue(), BookKeeper.DigestType.CRC32C, new byte[]{})); + }); + + compact(topicName); + + MutableLong compactedLedgerId2 = new MutableLong(-1); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topicName); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1L); + compactedLedgerId2.setValue(stats.compactedLedger.ledgerId); + Assert.assertEquals(stats.compactedLedger.entries, 2L); + }); + + producer.close(); + admin.topics().delete(topicName); + + Awaitility.await().untilAsserted(() -> assertThrows(BKException.BKNoSuchLedgerExistsException.class, + () -> pulsarTestContext.getBookKeeperClient().openLedger( + compactedLedgerId2.getValue(), BookKeeper.DigestType.CRC32, new byte[]{}))); + } + + @Test + public void testDeleteCompactedLedgerWithSlowAck() throws Exception { + // Disable topic level policies, since block ack thread may also block thread of delete topic policies. + conf.setTopicLevelPoliciesEnabled(false); + restartBroker(); + + String topicName = "persistent://my-property/use/my-ns/testDeleteCompactedLedgerWithSlowAck"; + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + + pulsarClient.newConsumer().topic(topicName).subscriptionType(SubscriptionType.Exclusive) + .subscriptionName(Compactor.COMPACTION_SUBSCRIPTION) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).readCompacted(true).subscribe() + .close(); + + for (int i = 0; i < 10; i++) { + producer.newMessage().key(String.valueOf(i % 2)).value(String.valueOf(i)).sendAsync(); + } + producer.flush(); + + PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get(); + PersistentSubscription subscription = spy(topic.getSubscription(Compactor.COMPACTION_SUBSCRIPTION)); + topic.getSubscriptions().put(Compactor.COMPACTION_SUBSCRIPTION, subscription); + + AtomicLong compactedLedgerId = new AtomicLong(-1); + AtomicBoolean pauseAck = new AtomicBoolean(); + Mockito.doAnswer(invocationOnMock -> { + Map properties = (Map) invocationOnMock.getArguments()[2]; + log.info("acknowledgeMessage properties: {}", properties); + compactedLedgerId.set(properties.get(Compactor.COMPACTED_TOPIC_LEDGER_PROPERTY)); + pauseAck.set(true); + while (pauseAck.get()) { + Thread.sleep(200); + } + return invocationOnMock.callRealMethod(); + }).when(subscription).acknowledgeMessage(Mockito.any(), Mockito.eq( + CommandAck.AckType.Cumulative), Mockito.any()); + + admin.topics().triggerCompaction(topicName); + + while (!pauseAck.get()) { + Thread.sleep(100); + } + + CompletableFuture currentCompaction = + (CompletableFuture) FieldUtils.readDeclaredField(topic, "currentCompaction", true); + CompletableFuture spyCurrentCompaction = spy(currentCompaction); + FieldUtils.writeDeclaredField(topic, "currentCompaction", spyCurrentCompaction, true); + currentCompaction.whenComplete((obj, throwable) -> { + if (throwable != null) { + spyCurrentCompaction.completeExceptionally(throwable); + } else { + spyCurrentCompaction.complete(obj); + } + }); + Mockito.doAnswer(invocationOnMock -> { + pauseAck.set(false); + return invocationOnMock.callRealMethod(); + }).when(spyCurrentCompaction).handle(Mockito.any()); + + admin.topics().delete(topicName, true); + + Awaitility.await().untilAsserted(() -> assertThrows(BKException.BKNoSuchLedgerExistsException.class, + () -> pulsarTestContext.getBookKeeperClient().openLedger( + compactedLedgerId.get(), BookKeeper.DigestType.CRC32, new byte[]{}))); + } + + @Test + public void testCompactionWithTTL() throws Exception { + String topicName = "persistent://my-property/use/my-ns/testCompactionWithTTL"; + String subName = "sub"; + pulsarClient.newConsumer(Schema.STRING).topic(topicName).subscriptionName(subName).readCompacted(true) + .subscribe().close(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + + producer.newMessage().key("K1").value("V1").send(); + producer.newMessage().key("K2").value("V2").send(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + producer.newMessage().key("K1").value("V3").send(); + producer.newMessage().key("K2").value("V4").send(); + + Thread.sleep(1000); + + // expire messages + admin.topics().expireMessagesForAllSubscriptions(topicName, 1); + + // trim the topic + admin.topics().unload(topicName); + + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(topicName, false); + assertEquals(internalStats.numberOfEntries, 4); + }); + + producer.newMessage().key("K3").value("V5").send(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING).topic(topicName) + .subscriptionName("sub-2") + .readCompacted(true) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + List result = new ArrayList<>(); + while (true) { + Message receive = consumer.receive(2, TimeUnit.SECONDS); + if (receive == null) { + break; + } + + result.add(receive.getValue()); + } + + Assert.assertEquals(result, List.of("V3", "V4", "V5")); + } + + @Test + public void testAcknowledgeWithReconnection() throws Exception { + final String topicName = "persistent://my-property/use/my-ns/testAcknowledge" + UUID.randomUUID(); + final String subName = "my-sub"; + @Cleanup + PulsarClient client = newPulsarClient(lookupUrl.toString(), 100); + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + + List expected = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + producer.newMessage().key(String.valueOf(i)).value(String.valueOf(i)).send(); + expected.add(String.valueOf(i)); + } + producer.flush(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + // trim the topic + admin.topics().unload(topicName); + + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(topicName, false); + assertEquals(internalStats.numberOfEntries, 0); + }); + + ConsumerImpl consumer = (ConsumerImpl) client.newConsumer(Schema.STRING) + .topic(topicName).readCompacted(true).receiverQueueSize(1).subscriptionName(subName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .isAckReceiptEnabled(true) + .subscribe(); + + List results = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + Message message = consumer.receive(3, TimeUnit.SECONDS); + if (message == null) { + break; + } + results.add(message.getValue()); + consumer.acknowledge(message); + } + + Awaitility.await().untilAsserted(() -> + assertEquals(admin.topics().getStats(topicName, true).getSubscriptions().get(subName).getMsgBacklog(), + 5)); + + // Make consumer reconnect to broker + admin.topics().unload(topicName); + + // Wait for consumer to reconnect and clear incomingMessages + consumer.pause(); + Awaitility.await().untilAsserted(() -> { + Assert.assertEquals(consumer.numMessagesInQueue(), 0); + }); + consumer.resume(); + + for (int i = 0; i < 5; i++) { + Message message = consumer.receive(3, TimeUnit.SECONDS); + if (message == null) { + break; + } + results.add(message.getValue()); + consumer.acknowledge(message); + } + + Awaitility.await().untilAsserted(() -> + assertEquals(admin.topics().getStats(topicName, true).getSubscriptions().get(subName).getMsgBacklog(), + 0)); + + Assert.assertEquals(results, expected); + + Message message = consumer.receive(3, TimeUnit.SECONDS); + Assert.assertNull(message); + + // Make consumer reconnect to broker + admin.topics().unload(topicName); + + producer.newMessage().key("K").value("V").send(); + Message message2 = consumer.receive(3, TimeUnit.SECONDS); + Assert.assertEquals(message2.getValue(), "V"); + consumer.acknowledge(message2); + + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(topicName); + Assert.assertEquals(internalStats.lastConfirmedEntry, + internalStats.cursors.get(subName).markDeletePosition); + }); + + consumer.close(); + producer.close(); + } + + @Test + public void testEarliestSubsAfterRollover() throws Exception { + final String topicName = "persistent://my-property/use/my-ns/testEarliestSubsAfterRollover" + UUID.randomUUID(); + final String subName = "my-sub"; + @Cleanup + PulsarClient client = newPulsarClient(lookupUrl.toString(), 100); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false).topic(topicName).create(); + + List expected = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + producer.newMessage().key(String.valueOf(i)).value(String.valueOf(i)).send(); + expected.add(String.valueOf(i)); + } + producer.flush(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + // trim the topic + admin.topics().unload(topicName); + + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(topicName, false); + assertEquals(internalStats.numberOfEntries, 0); + }); + + // Make ml.getFirstPosition() return new ledger first position + producer.newMessage().key("K").value("V").send(); + expected.add("V"); + + @Cleanup + ConsumerImpl consumer = (ConsumerImpl) client.newConsumer(Schema.STRING) + .topic(topicName).readCompacted(true).receiverQueueSize(1).subscriptionName(subName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .isAckReceiptEnabled(true) + .subscribe(); + + List results = new ArrayList<>(); + while (true) { + Message message = consumer.receive(3, TimeUnit.SECONDS); + if (message == null) { + break; + } + + results.add(message.getValue()); + consumer.acknowledge(message); + } + + Assert.assertEquals(results, expected); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorTest.java index e86be6a4db816..71700ef83a443 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactorTest.java @@ -19,13 +19,12 @@ package org.apache.pulsar.compaction; import static org.apache.pulsar.client.impl.RawReaderTest.extractKey; - +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import io.netty.buffer.ByteBuf; - import java.util.ArrayList; import java.util.Enumeration; import java.util.HashMap; @@ -33,23 +32,38 @@ import java.util.Map; import java.util.Optional; import java.util.Random; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import lombok.Cleanup; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.LedgerEntry; import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.mledger.Entry; +import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.admin.LongRunningProcessStatus; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.RawMessage; +import org.apache.pulsar.client.api.Reader; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.RawMessageImpl; import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.protocol.Commands; +import org.awaitility.Awaitility; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -65,7 +79,6 @@ public class CompactorTest extends MockedPulsarServiceBaseTest { protected Compactor compactor; - @BeforeMethod @Override public void setup() throws Exception { @@ -101,16 +114,17 @@ protected Compactor getCompactor() { return compactor; } - private List compactAndVerify(String topic, Map expected, boolean checkMetrics) throws Exception { + private List compactAndVerify(String topic, Map expected, boolean checkMetrics) + throws Exception { long compactedLedgerId = compact(topic); LedgerHandle ledger = bk.openLedger(compactedLedgerId, - Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, - Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD); + Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, + Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD); Assert.assertEquals(ledger.getLastAddConfirmed() + 1, // 0..lac - expected.size(), - "Should have as many entries as there is keys"); + expected.size(), + "Should have as many entries as there is keys"); List keys = new ArrayList<>(); Enumeration entries = ledger.readEntries(0, ledger.getLastAddConfirmed()); @@ -124,7 +138,7 @@ private List compactAndVerify(String topic, Map expected byte[] bytes = new byte[payload.readableBytes()]; payload.readBytes(bytes); Assert.assertEquals(bytes, expected.remove(key), - "Compacted version should match expected version"); + "Compacted version should match expected version"); m.close(); } if (checkMetrics) { @@ -148,17 +162,18 @@ public void testCompaction() throws Exception { final int numMessages = 1000; final int maxKeys = 10; + @Cleanup Producer producer = pulsarClient.newProducer().topic(topic) - .enableBatching(false) - .messageRoutingMode(MessageRoutingMode.SinglePartition) - .create(); + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); Map expected = new HashMap<>(); Random r = new Random(0); for (int j = 0; j < numMessages; j++) { int keyIndex = r.nextInt(maxKeys); - String key = "key"+keyIndex; + String key = "key" + keyIndex; byte[] data = ("my-message-" + key + "-" + j).getBytes(); producer.newMessage() .key(key) @@ -169,14 +184,64 @@ public void testCompaction() throws Exception { compactAndVerify(topic, expected, true); } + @Test + public void testAllCompactedOut() throws Exception { + String topicName = "persistent://my-property/use/my-ns/testAllCompactedOut"; + // set retain null key to true + boolean oldRetainNullKey = pulsar.getConfig().isTopicCompactionRetainNullKey(); + pulsar.getConfig().setTopicCompactionRetainNullKey(true); + this.restartBroker(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(true).topic(topicName).batchingMaxMessages(3).create(); + + producer.newMessage().key("K1").value("V1").sendAsync(); + producer.newMessage().key("K2").value("V2").sendAsync(); + producer.newMessage().key("K2").value(null).sendAsync(); + producer.flush(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + Assert.assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + producer.newMessage().key("K1").value(null).sendAsync(); + producer.flush(); + + admin.topics().triggerCompaction(topicName); + + Awaitility.await().untilAsserted(() -> { + Assert.assertEquals(admin.topics().compactionStatus(topicName).status, + LongRunningProcessStatus.Status.SUCCESS); + }); + + @Cleanup + Reader reader = pulsarClient.newReader(Schema.STRING) + .subscriptionName("reader-test") + .topic(topicName) + .readCompacted(true) + .startMessageId(MessageId.earliest) + .create(); + while (reader.hasMessageAvailable()) { + Message message = reader.readNext(3, TimeUnit.SECONDS); + Assert.assertNotNull(message); + } + // set retain null key back to avoid affecting other tests + pulsar.getConfig().setTopicCompactionRetainNullKey(oldRetainNullKey); + } + @Test public void testCompactAddCompact() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; + @Cleanup Producer producer = pulsarClient.newProducer().topic(topic) - .enableBatching(false) - .messageRoutingMode(MessageRoutingMode.SinglePartition) - .create(); + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); Map expected = new HashMap<>(); @@ -210,10 +275,11 @@ public void testCompactAddCompact() throws Exception { public void testCompactedInOrder() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; + @Cleanup Producer producer = pulsarClient.newProducer().topic(topic) - .enableBatching(false) - .messageRoutingMode(MessageRoutingMode.SinglePartition) - .create(); + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); producer.newMessage() .key("c") @@ -251,11 +317,56 @@ public void testCompactEmptyTopic() throws Exception { public void testPhaseOneLoopTimeConfiguration() { ServiceConfiguration configuration = new ServiceConfiguration(); configuration.setBrokerServiceCompactionPhaseOneLoopTimeInSeconds(60); - TwoPhaseCompactor compactor = new TwoPhaseCompactor(configuration, Mockito.mock(PulsarClientImpl.class), + PulsarClientImpl mockClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); + TwoPhaseCompactor compactor = new TwoPhaseCompactor(configuration, mockClient, Mockito.mock(BookKeeper.class), compactionScheduler); Assert.assertEquals(compactor.getPhaseOneLoopReadTimeoutInSeconds(), 60); } + @Test + public void testCompactedWithConcurrentSend() throws Exception { + String topic = "persistent://my-property/use/my-ns/testCompactedWithConcurrentSend"; + + @Cleanup + Producer producer = pulsarClient.newProducer().topic(topic) + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); + + var future = CompletableFuture.runAsync(() -> { + for (int i = 0; i < 100; i++) { + try { + producer.newMessage().key(String.valueOf(i)).value(String.valueOf(i).getBytes()).send(); + } catch (PulsarClientException e) { + throw new RuntimeException(e); + } + } + }); + + PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topic).get(); + PulsarTopicCompactionService topicCompactionService = + (PulsarTopicCompactionService) persistentTopic.getTopicCompactionService(); + + Awaitility.await().untilAsserted(() -> { + long compactedLedgerId = compact(topic); + Thread.sleep(300); + Optional compactedTopicContext = topicCompactionService.getCompactedTopic() + .getCompactedTopicContext(); + Assert.assertTrue(compactedTopicContext.isPresent()); + Assert.assertEquals(compactedTopicContext.get().ledger.getId(), compactedLedgerId); + }); + + Position lastCompactedPosition = topicCompactionService.getLastCompactedPosition().get(); + Entry lastCompactedEntry = topicCompactionService.readLastCompactedEntry().get(); + + Assert.assertTrue(PositionImpl.get(lastCompactedPosition.getLedgerId(), lastCompactedPosition.getEntryId()) + .compareTo(lastCompactedEntry.getLedgerId(), lastCompactedEntry.getEntryId()) >= 0); + + future.join(); + } + public ByteBuf extractPayload(RawMessage m) throws Exception { ByteBuf payloadAndMetadata = m.getHeadersAndPayload(); Commands.skipChecksumIfPresent(payloadAndMetadata); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/GetLastMessageIdCompactedTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/GetLastMessageIdCompactedTest.java index 317b1a227e585..6c2d848bb7c2d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/GetLastMessageIdCompactedTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/GetLastMessageIdCompactedTest.java @@ -20,6 +20,8 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotEquals; + import java.util.ArrayList; import java.util.List; import java.util.Optional; @@ -32,6 +34,7 @@ import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.ProducerBuilder; @@ -415,4 +418,28 @@ public void testGetLastMessageIdAfterCompactionAllNullMsg(boolean enabledBatch) producer.close(); admin.topics().delete(topicName, false); } + + @Test(dataProvider = "enabledBatch") + public void testReaderStuckWithCompaction(boolean enabledBatch) throws Exception { + String topicName = "persistent://public/default/" + BrokerTestUtil.newUniqueName("tp"); + String subName = "sub"; + Producer producer = createProducer(enabledBatch, topicName); + producer.newMessage().key("k0").value("v0").sendAsync(); + producer.newMessage().key("k0").value("v1").sendAsync(); + producer.flush(); + + triggerCompactionAndWait(topicName); + triggerLedgerSwitch(topicName); + clearAllTheLedgersOutdated(topicName); + + var reader = pulsarClient.newReader(Schema.STRING) + .topic(topicName) + .subscriptionName(subName) + .startMessageId(MessageId.earliest) + .create(); + while (reader.hasMessageAvailable()) { + Message message = reader.readNext(5, TimeUnit.SECONDS); + assertNotEquals(message, null); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/ServiceUnitStateCompactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/ServiceUnitStateCompactionTest.java index e4f0750a981c9..09c3ebe419394 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/ServiceUnitStateCompactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/ServiceUnitStateCompactionTest.java @@ -168,7 +168,7 @@ public void setup() throws Exception { @Override public void cleanup() throws Exception { super.internalCleanup(); - + bk.close(); if (compactionScheduler != null) { compactionScheduler.shutdownNow(); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionRetentionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionRetentionTest.java index 1cac04c2fa956..e556ec8e0b200 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionRetentionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionRetentionTest.java @@ -34,7 +34,7 @@ public class StrategicCompactionRetentionTest extends CompactionRetentionTest { @Override public void setup() throws Exception { super.setup(); - compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler, 1); + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); strategy = new TopicCompactionStrategyTest.DummyTopicCompactionStrategy(); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionTest.java index 135a839bd54a8..54563431052eb 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactionTest.java @@ -18,22 +18,33 @@ */ package org.apache.pulsar.compaction; +import static org.apache.pulsar.broker.loadbalance.extensions.channel.ServiceUnitStateChannelImpl.MSG_COMPRESSION_TYPE; +import static org.testng.Assert.assertEquals; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.CryptoKeyReader; import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerBuilder; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.TableView; import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.common.topics.TopicCompactionStrategy; +import org.apache.pulsar.common.util.FutureUtil; import org.testng.Assert; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -47,7 +58,7 @@ public class StrategicCompactionTest extends CompactionTest { @Override public void setup() throws Exception { super.setup(); - compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler, 1); + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); strategy = new TopicCompactionStrategyTest.DummyTopicCompactionStrategy(); } @@ -148,5 +159,58 @@ public void testNumericOrderCompaction() throws Exception { Assert.assertEquals(tableView.entrySet(), expectedCopy.entrySet()); } + @Test(timeOut = 20000) + public void testSameBatchCompactToSameBatch() throws Exception { + final String topic = + "persistent://my-property/use/my-ns/testSameBatchCompactToSameBatch" + UUID.randomUUID(); + + // Use odd number to make sure the last message is flush by `reader.hasNext() == false`. + final int messages = 11; + + // 1.create producer and publish message to the topic. + ProducerBuilder builder = pulsarClient.newProducer(Schema.INT32) + .compressionType(MSG_COMPRESSION_TYPE).topic(topic); + builder.batchingMaxMessages(2) + .batchingMaxPublishDelay(10, TimeUnit.MILLISECONDS); + + Producer producer = builder.create(); + + List> futures = new ArrayList<>(messages); + for (int i = 0; i < messages; i++) { + futures.add(producer.newMessage().key(String.valueOf(i)) + .value(i) + .sendAsync()); + } + FutureUtil.waitForAll(futures).get(); + + // 2.compact the topic. + StrategicTwoPhaseCompactor compactor + = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + compactor.compact(topic, strategy).get(); + // consumer with readCompacted enabled only get compacted entries + try (Consumer consumer = pulsarClient + .newConsumer(Schema.INT32) + .topic(topic) + .subscriptionName("sub1") + .readCompacted(true) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) { + int received = 0; + while (true) { + Message m = consumer.receive(2, TimeUnit.SECONDS); + if (m == null) { + break; + } + MessageIdAdv messageId = (MessageIdAdv) m.getMessageId(); + if (received < messages - 1) { + assertEquals(messageId.getBatchSize(), 2); + } else { + assertEquals(messageId.getBatchSize(), 0); + } + received++; + } + assertEquals(received, messages); + } + + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactorTest.java index 91dd8a2bd358b..bc65791b323cd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/StrategicCompactorTest.java @@ -33,7 +33,7 @@ public class StrategicCompactorTest extends CompactorTest { @Override public void setup() throws Exception { super.setup(); - compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler, 1); + compactor = new StrategicTwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); strategy = new TopicCompactionStrategyTest.DummyTopicCompactionStrategy(); } @@ -46,4 +46,4 @@ protected long compact(String topic) throws ExecutionException, InterruptedExcep protected Compactor getCompactor() { return compactor; } -} \ No newline at end of file +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/TopicCompactionServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/TopicCompactionServiceTest.java index 5810e0180d07c..4abe00fb0c631 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/TopicCompactionServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/TopicCompactionServiceTest.java @@ -21,14 +21,20 @@ import static org.apache.pulsar.compaction.Compactor.COMPACTED_TOPIC_LEDGER_PROPERTY; import static org.apache.pulsar.compaction.Compactor.COMPACTION_SUBSCRIPTION; import static org.testng.Assert.assertEquals; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.IOException; import java.util.List; import java.util.Objects; +import java.util.Optional; import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import lombok.Cleanup; +import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; @@ -36,12 +42,21 @@ import org.apache.pulsar.client.impl.MessageImpl; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -public class TopicCompactionServiceTest extends CompactorTest { +public class TopicCompactionServiceTest extends MockedPulsarServiceBaseTest { + + protected ScheduledExecutorService compactionScheduler; + protected BookKeeper bk; + private TwoPhaseCompactor compactor; + + @BeforeMethod + @Override + public void setup() throws Exception { + super.internalSetup(); - @Test - public void test() throws PulsarClientException, PulsarAdminException { admin.clusters().createCluster("test", ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build()); TenantInfoImpl tenantInfo = new TenantInfoImpl(Set.of("role1", "role2"), Set.of("test")); String defaultTenant = "prop-xyz"; @@ -49,6 +64,25 @@ public void test() throws PulsarClientException, PulsarAdminException { String defaultNamespace = defaultTenant + "/ns1"; admin.namespaces().createNamespace(defaultNamespace, Set.of("test")); + compactionScheduler = Executors.newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder().setNameFormat("compactor").setDaemon(true).build()); + bk = pulsar.getBookKeeperClientFactory().create( + this.conf, null, null, Optional.empty(), null); + compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); + } + + @AfterMethod(alwaysRun = true) + @Override + public void cleanup() throws Exception { + super.internalCleanup(); + bk.close(); + if (compactionScheduler != null) { + compactionScheduler.shutdownNow(); + } + } + + @Test + public void test() throws PulsarClientException, PulsarAdminException { String topic = "persistent://prop-xyz/ns1/my-topic"; PulsarTopicCompactionService service = new PulsarTopicCompactionService(topic, bk, () -> compactor); @@ -114,5 +148,8 @@ public void test() throws PulsarClientException, PulsarAdminException { assertEquals(data, "B_3"); } }); + + List entries2 = service.readCompactedEntries(PositionImpl.EARLIEST, 1).join(); + assertEquals(entries2.size(), 1); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java index 714c9d7269970..cbf2f28b0b50b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java @@ -34,6 +34,7 @@ import java.net.URL; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; @@ -267,8 +268,12 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf workerConfig.setAuthorizationEnabled(config.isAuthorizationEnabled()); workerConfig.setAuthorizationProvider(config.getAuthorizationProvider()); + List urlPatterns = + List.of(getPulsarApiExamplesJar().getParentFile().toURI() + ".*", "http://127\\.0\\.0\\.1:.*"); + workerConfig.setAdditionalEnabledConnectorUrlPatterns(urlPatterns); + workerConfig.setAdditionalEnabledFunctionsUrlPatterns(urlPatterns); + PulsarWorkerService workerService = new PulsarWorkerService(); - workerService.init(workerConfig, null, false); return workerService; } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java index c820f512a68de..72e6a3766aeee 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java @@ -268,8 +268,11 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf workerConfig.setAuthenticationEnabled(true); workerConfig.setAuthorizationEnabled(true); + List urlPatterns = List.of(getPulsarApiExamplesJar().getParentFile().toURI() + ".*"); + workerConfig.setAdditionalEnabledConnectorUrlPatterns(urlPatterns); + workerConfig.setAdditionalEnabledFunctionsUrlPatterns(urlPatterns); + PulsarWorkerService workerService = new PulsarWorkerService(); - workerService.init(workerConfig, null, false); return workerService; } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionTlsTest.java index 1e8b26beee38a..3508cf0bfc7e6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionTlsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionTlsTest.java @@ -20,6 +20,8 @@ import static org.apache.pulsar.common.util.PortManager.nextLockedFreePort; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Sets; @@ -30,9 +32,11 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; @@ -41,6 +45,7 @@ import org.apache.pulsar.client.api.Authentication; import org.apache.pulsar.client.impl.auth.AuthenticationTls; import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.WorkerInfo; import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.ObjectMapperFactory; @@ -149,6 +154,12 @@ void setup() throws Exception { workerConfig.setUseTls(true); workerConfig.setTlsEnableHostnameVerification(true); workerConfig.setTlsAllowInsecureConnection(false); + File packagePath = new File( + PulsarSink.class.getProtectionDomain().getCodeSource().getLocation().getPath()).getParentFile(); + List urlPatterns = + List.of(packagePath.toURI() + ".*"); + workerConfig.setAdditionalEnabledConnectorUrlPatterns(urlPatterns); + workerConfig.setAdditionalEnabledFunctionsUrlPatterns(urlPatterns); fnWorkerServices[i] = WorkerServiceLoader.load(workerConfig); configurations[i] = config; @@ -242,6 +253,18 @@ public void testFunctionsCreation() throws Exception { log.info(" -------- Start test function : {}", functionName); + int finalI = i; + Awaitility.await().atMost(1, TimeUnit.MINUTES).pollInterval(1, TimeUnit.SECONDS).untilAsserted(() -> { + final PulsarWorkerService workerService = ((PulsarWorkerService) fnWorkerServices[finalI]); + final LeaderService leaderService = workerService.getLeaderService(); + assertNotNull(leaderService); + if (leaderService.isLeader()) { + assertTrue(true); + } else { + final WorkerInfo workerInfo = workerService.getMembershipManager().getLeader(); + assertTrue(workerInfo != null && !workerInfo.getWorkerId().equals(workerService.getWorkerConfig().getWorkerId())); + } + }); pulsarAdmins[i].functions().createFunctionWithUrl( functionConfig, jarFilePathUrl ); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java index 0821974bea506..6226fa904885c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java @@ -174,7 +174,6 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf workerConfig.setTopicCompactionFrequencySec(1); PulsarWorkerService workerService = new PulsarWorkerService(); - workerService.init(workerConfig, null, false); return workerService; } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java b/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java index 3a99cc647ed5c..3c0dd0822b7dc 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java @@ -35,6 +35,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -306,8 +307,12 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf workerConfig.setAuthenticationEnabled(true); workerConfig.setAuthorizationEnabled(true); + List urlPatterns = + List.of(getPulsarApiExamplesJar().getParentFile().toURI() + ".*", "http://127\\.0\\.0\\.1:.*"); + workerConfig.setAdditionalEnabledConnectorUrlPatterns(urlPatterns); + workerConfig.setAdditionalEnabledFunctionsUrlPatterns(urlPatterns); + PulsarWorkerService workerService = new PulsarWorkerService(); - workerService.init(workerConfig, null, false); return workerService; } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java index d31d0c66bdf93..22b9ad0df3a69 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java @@ -211,7 +211,6 @@ private PulsarWorkerService createPulsarFunctionWorker(ServiceConfiguration conf workerConfig.setTlsTrustCertsFilePath(TLS_CLIENT_CERT_FILE_PATH); PulsarWorkerService workerService = new PulsarWorkerService(); - workerService.init(workerConfig, null, false); return workerService; } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java index 140dea9e7ebc7..49517a424b936 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java @@ -407,7 +407,7 @@ public void testSchemaComparison() throws Exception { assertEquals(admin.namespaces().getSchemaCompatibilityStrategy(namespaceName.toString()), SchemaCompatibilityStrategy.UNDEFINED); byte[] changeSchemaBytes = (new String(Schema.AVRO(Schemas.PersonOne.class) - .getSchemaInfo().getSchema(), UTF_8) + "/n /n /n").getBytes(); + .getSchemaInfo().getSchema(), UTF_8) + "\n \n \n").getBytes(); SchemaInfo schemaInfo = SchemaInfo.builder().type(SchemaType.AVRO).schema(changeSchemaBytes).build(); admin.schemas().createSchema(fqtn, schemaInfo); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/security/MockedPulsarStandalone.java b/pulsar-broker/src/test/java/org/apache/pulsar/security/MockedPulsarStandalone.java new file mode 100644 index 0000000000000..20dd2e1066a87 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/security/MockedPulsarStandalone.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.security; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Sets; +import io.jsonwebtoken.Jwts; +import io.jsonwebtoken.SignatureAlgorithm; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import javax.crypto.SecretKey; +import lombok.Getter; +import lombok.SneakyThrows; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; +import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; +import org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider; +import org.apache.pulsar.broker.testcontext.PulsarTestContext; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfo; +import org.apache.pulsar.common.util.ObjectMapperFactory; + + +public abstract class MockedPulsarStandalone implements AutoCloseable { + + @Getter + private final ServiceConfiguration serviceConfiguration = new ServiceConfiguration(); + private PulsarTestContext pulsarTestContext; + + @Getter + private PulsarService pulsarService; + private PulsarAdmin serviceInternalAdmin; + + + { + serviceConfiguration.setClusterName(TEST_CLUSTER_NAME); + serviceConfiguration.setBrokerShutdownTimeoutMs(0L); + serviceConfiguration.setBrokerServicePort(Optional.of(0)); + serviceConfiguration.setBrokerServicePortTls(Optional.of(0)); + serviceConfiguration.setAdvertisedAddress("localhost"); + serviceConfiguration.setWebServicePort(Optional.of(0)); + serviceConfiguration.setWebServicePortTls(Optional.of(0)); + serviceConfiguration.setNumExecutorThreadPoolSize(5); + serviceConfiguration.setExposeBundlesMetricsInPrometheus(true); + } + + + protected static final SecretKey SECRET_KEY = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + + private static final String BROKER_INTERNAL_CLIENT_SUBJECT = "broker_internal"; + private static final String BROKER_INTERNAL_CLIENT_TOKEN = Jwts.builder() + .claim("sub", BROKER_INTERNAL_CLIENT_SUBJECT).signWith(SECRET_KEY).compact(); + protected static final String SUPER_USER_SUBJECT = "super-user"; + protected static final String SUPER_USER_TOKEN = Jwts.builder() + .claim("sub", SUPER_USER_SUBJECT).signWith(SECRET_KEY).compact(); + protected static final String NOBODY_SUBJECT = "nobody"; + protected static final String NOBODY_TOKEN = Jwts.builder() + .claim("sub", NOBODY_SUBJECT).signWith(SECRET_KEY).compact(); + + + @SneakyThrows + protected void configureTokenAuthentication() { + serviceConfiguration.setAuthenticationEnabled(true); + serviceConfiguration.setAuthenticationProviders(Set.of(AuthenticationProviderToken.class.getName())); + // internal client + serviceConfiguration.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); + final Map brokerClientAuthParams = new HashMap<>(); + brokerClientAuthParams.put("token", BROKER_INTERNAL_CLIENT_TOKEN); + final String brokerClientAuthParamStr = MAPPER.writeValueAsString(brokerClientAuthParams); + serviceConfiguration.setBrokerClientAuthenticationParameters(brokerClientAuthParamStr); + + Properties properties = serviceConfiguration.getProperties(); + if (properties == null) { + properties = new Properties(); + serviceConfiguration.setProperties(properties); + } + properties.put("tokenSecretKey", AuthTokenUtils.encodeKeyBase64(SECRET_KEY)); + + } + + + + protected void configureDefaultAuthorization() { + serviceConfiguration.setAuthorizationEnabled(true); + serviceConfiguration.setAuthorizationProvider(PulsarAuthorizationProvider.class.getName()); + serviceConfiguration.setSuperUserRoles(Set.of(SUPER_USER_SUBJECT, BROKER_INTERNAL_CLIENT_SUBJECT)); + } + + + @SneakyThrows + protected void start() { + this.pulsarTestContext = PulsarTestContext.builder() + .spyByDefault() + .config(serviceConfiguration) + .withMockZookeeper(false) + .build(); + this.pulsarService = pulsarTestContext.getPulsarService(); + this.serviceInternalAdmin = pulsarService.getAdminClient(); + setupDefaultTenantAndNamespace(); + } + + private void setupDefaultTenantAndNamespace() throws Exception { + if (!serviceInternalAdmin.clusters().getClusters().contains(TEST_CLUSTER_NAME)) { + serviceInternalAdmin.clusters().createCluster(TEST_CLUSTER_NAME, + ClusterData.builder().serviceUrl(pulsarService.getWebServiceAddress()).build()); + } + if (!serviceInternalAdmin.tenants().getTenants().contains(DEFAULT_TENANT)) { + serviceInternalAdmin.tenants().createTenant(DEFAULT_TENANT, TenantInfo.builder().allowedClusters( + Sets.newHashSet(TEST_CLUSTER_NAME)).build()); + } + if (!serviceInternalAdmin.namespaces().getNamespaces(DEFAULT_TENANT).contains(DEFAULT_NAMESPACE)) { + serviceInternalAdmin.namespaces().createNamespace(DEFAULT_NAMESPACE); + } + } + + + @Override + public void close() throws Exception { + if (pulsarTestContext != null) { + pulsarTestContext.close(); + } + } + + // Utils + protected static final ObjectMapper mapper = new ObjectMapper(); + + // Static name + private static final String DEFAULT_TENANT = "public"; + private static final String DEFAULT_NAMESPACE = "public/default"; + private static final String TEST_CLUSTER_NAME = "test-standalone"; + + private static final ObjectMapper MAPPER = ObjectMapperFactory.getMapper().getObjectMapper(); +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsembleTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsembleTest.java index 92899feda7371..a4bc69a7266cc 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsembleTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/zookeeper/LocalBookkeeperEnsembleTest.java @@ -21,10 +21,6 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; - -import java.util.Collections; -import java.util.List; - import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -39,22 +35,6 @@ void setup() throws Exception { void teardown() throws Exception { } - @Test - public void testAdvertisedAddress() throws Exception { - final int numBk = 1; - - LocalBookkeeperEnsemble ensemble = new LocalBookkeeperEnsemble( - numBk, 0, 0, null, null, true, "127.0.0.2"); - ensemble.startStandalone(); - - List bookies = ensemble.getZkClient().getChildren("/ledgers/available", false); - Collections.sort(bookies); - assertEquals(bookies.size(), 2); - assertTrue(bookies.get(0).startsWith("127.0.0.2:")); - - ensemble.stop(); - } - @Test public void testStartStop() throws Exception { diff --git a/pulsar-broker/src/test/resources/configurations/standalone_no_client_auth.conf b/pulsar-broker/src/test/resources/configurations/standalone_no_client_auth.conf index d9411e655ad5b..4e2fd40298354 100644 --- a/pulsar-broker/src/test/resources/configurations/standalone_no_client_auth.conf +++ b/pulsar-broker/src/test/resources/configurations/standalone_no_client_auth.conf @@ -29,4 +29,5 @@ authenticationEnabled=true authenticationProviders=org.apache.pulsar.MockTokenAuthenticationProvider brokerClientAuthenticationPlugin= brokerClientAuthenticationParameters= -loadBalancerOverrideBrokerNicSpeedGbps=2 \ No newline at end of file +loadBalancerOverrideBrokerNicSpeedGbps=2 +topicLevelPoliciesEnabled=false \ No newline at end of file diff --git a/pulsar-client-1x-base/pom.xml b/pulsar-client-1x-base/pom.xml index 41de4d2e06bde..b847e5f53cb85 100644 --- a/pulsar-client-1x-base/pom.xml +++ b/pulsar-client-1x-base/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-1x-base Pulsar Client 1.x Compatibility Base pom - pulsar-client-2x-shaded pulsar-client-1x - @@ -61,7 +57,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -77,5 +72,4 @@ - diff --git a/pulsar-client-1x-base/pulsar-client-1x/pom.xml b/pulsar-client-1x-base/pulsar-client-1x/pom.xml index 307a1d76f7490..95e902fdf770c 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/pom.xml +++ b/pulsar-client-1x-base/pulsar-client-1x/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-client-1x-base 3.1.0-SNAPSHOT .. - pulsar-client-1x Pulsar Client 1.x Compatibility API - ${project.groupId} pulsar-client-2x-shaded ${project.version} - com.google.guava guava - org.apache.commons commons-lang3 - - @@ -70,7 +63,6 @@ - com.github.spotbugs spotbugs-maven-plugin @@ -90,5 +82,4 @@ - diff --git a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java index 3b0efe64cf588..ea2bba166e6c5 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java +++ b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ClientConfiguration.java @@ -368,6 +368,7 @@ public ClientConfiguration setServiceUrl(String serviceUrl) { * @param unit the time unit in which the duration is defined */ public void setConnectionTimeout(int duration, TimeUnit unit) { + checkArgument(duration >= 0); confData.setConnectionTimeoutMs((int) unit.toMillis(duration)); } diff --git a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ConsumerConfiguration.java b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ConsumerConfiguration.java index 81956db56f774..f2101b287043c 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ConsumerConfiguration.java +++ b/pulsar-client-1x-base/pulsar-client-1x/src/main/java/org/apache/pulsar/client/api/ConsumerConfiguration.java @@ -69,6 +69,7 @@ public long getAckTimeoutMillis() { * @return {@link ConsumerConfiguration} */ public ConsumerConfiguration setAckTimeout(long ackTimeout, TimeUnit timeUnit) { + checkArgument(ackTimeout >= 0); long ackTimeoutMillis = timeUnit.toMillis(ackTimeout); checkArgument(ackTimeoutMillis >= minAckTimeoutMillis, "Ack timeout should be should be greater than " + minAckTimeoutMillis + " ms"); diff --git a/pulsar-client-1x-base/pulsar-client-1x/src/main/resources/findbugsExclude.xml b/pulsar-client-1x-base/pulsar-client-1x/src/main/resources/findbugsExclude.xml index 7938e60bf4330..d191e191d52ca 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/src/main/resources/findbugsExclude.xml +++ b/pulsar-client-1x-base/pulsar-client-1x/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-client-1x-base 3.1.0-SNAPSHOT .. - pulsar-client-2x-shaded Pulsar Client 2.x Shaded API - ${project.groupId} @@ -39,10 +36,9 @@ ${project.version} - - + org.apache.maven.plugins maven-shade-plugin @@ -56,16 +52,15 @@ true true false - - org.apache.pulsar:pulsar-client - org.apache.pulsar:pulsar-client-api + io.streamnative:pulsar-client + io.streamnative:pulsar-client-api - org.apache.pulsar:pulsar-client + io.streamnative:pulsar-client ** @@ -93,6 +88,30 @@ + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/pulsar-client-admin-api/pom.xml b/pulsar-client-admin-api/pom.xml index be666085c2c5f..1684e0479128a 100644 --- a/pulsar-client-admin-api/pom.xml +++ b/pulsar-client-admin-api/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - - org.apache.pulsar - pulsar - 3.1.0-SNAPSHOT - .. - - - pulsar-client-admin-api - Pulsar Client Admin :: API - - - - ${project.groupId} - pulsar-client-api - ${project.version} - - - - org.slf4j - slf4j-api - - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - - ${pulsar.client.compiler.release} - - - - org.gaul - modernizer-maven-plugin - - true - 8 - - - - modernizer - verify - - modernizer - - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - - + + 4.0.0 + + io.streamnative + pulsar + 3.1.0-SNAPSHOT + .. + + pulsar-client-admin-api + Pulsar Client Admin :: API + + + ${project.groupId} + pulsar-client-api + ${project.version} + + + org.slf4j + slf4j-api + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + ${pulsar.client.compiler.release} + + + + org.gaul + modernizer-maven-plugin + + true + 8 + + + + modernizer + verify + + modernizer + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + verify + + check + + + + + + diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Brokers.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Brokers.java index 29c280f8ba536..dc0b7c9885a9a 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Brokers.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Brokers.java @@ -35,7 +35,7 @@ public interface Brokers { /** * Get the list of active brokers in the local cluster. *

- * Get the list of active brokers (web service addresses) in the local cluster. + * Get the list of active brokers (broker ids) in the local cluster. *

* Response Example: * @@ -44,7 +44,7 @@ public interface Brokers { * * * "prod1-broker3.messaging.use.example.com:8080"] * * - * @return a list of (host:port) + * @return a list of broker ids * @throws NotAuthorizedException * You don't have admin permission to get the list of active brokers in the cluster * @throws PulsarAdminException @@ -55,7 +55,7 @@ public interface Brokers { /** * Get the list of active brokers in the local cluster asynchronously. *

- * Get the list of active brokers (web service addresses) in the local cluster. + * Get the list of active brokers (broker ids) in the local cluster. *

* Response Example: * @@ -64,13 +64,13 @@ public interface Brokers { * "prod1-broker3.messaging.use.example.com:8080"] * * - * @return a list of (host:port) + * @return a list of broker ids */ CompletableFuture> getActiveBrokersAsync(); /** * Get the list of active brokers in the cluster. *

- * Get the list of active brokers (web service addresses) in the cluster. + * Get the list of active brokers (broker ids) in the cluster. *

* Response Example: * @@ -81,7 +81,7 @@ public interface Brokers { * * @param cluster * Cluster name - * @return a list of (host:port) + * @return a list of broker ids * @throws NotAuthorizedException * You don't have admin permission to get the list of active brokers in the cluster * @throws NotFoundException @@ -94,7 +94,7 @@ public interface Brokers { /** * Get the list of active brokers in the cluster asynchronously. *

- * Get the list of active brokers (web service addresses) in the cluster. + * Get the list of active brokers (broker ids) in the cluster. *

* Response Example: * @@ -105,7 +105,7 @@ public interface Brokers { * * @param cluster * Cluster name - * @return a list of (host:port) + * @return a list of broker ids */ CompletableFuture> getActiveBrokersAsync(String cluster); @@ -156,11 +156,11 @@ public interface Brokers { * * * @param cluster - * @param brokerUrl + * @param brokerId * @return * @throws PulsarAdminException */ - Map getOwnedNamespaces(String cluster, String brokerUrl) + Map getOwnedNamespaces(String cluster, String brokerId) throws PulsarAdminException; /** @@ -176,10 +176,10 @@ Map getOwnedNamespaces(String cluster, String * * * @param cluster - * @param brokerUrl + * @param brokerId * @return */ - CompletableFuture> getOwnedNamespacesAsync(String cluster, String brokerUrl); + CompletableFuture> getOwnedNamespacesAsync(String cluster, String brokerId); /** * Update a dynamic configuration value into ZooKeeper. diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BrokerInfo.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BrokerInfo.java index 8955fe7a0ac78..19e9ff2d15a2b 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BrokerInfo.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BrokerInfo.java @@ -25,9 +25,11 @@ */ public interface BrokerInfo { String getServiceUrl(); + String getBrokerId(); interface Builder { Builder serviceUrl(String serviceUrl); + Builder brokerId(String brokerId); BrokerInfo build(); } diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BrokerInfoImpl.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BrokerInfoImpl.java index e4d0a68b50ad0..d77f693c7cd70 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BrokerInfoImpl.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BrokerInfoImpl.java @@ -31,6 +31,7 @@ @NoArgsConstructor public final class BrokerInfoImpl implements BrokerInfo { private String serviceUrl; + private String brokerId; public static BrokerInfoImplBuilder builder() { return new BrokerInfoImplBuilder(); @@ -38,14 +39,20 @@ public static BrokerInfoImplBuilder builder() { public static class BrokerInfoImplBuilder implements BrokerInfo.Builder { private String serviceUrl; + private String brokerId; public BrokerInfoImplBuilder serviceUrl(String serviceUrl) { this.serviceUrl = serviceUrl; return this; } + public BrokerInfoImplBuilder brokerId(String brokerId) { + this.brokerId = brokerId; + return this; + } + public BrokerInfoImpl build() { - return new BrokerInfoImpl(serviceUrl); + return new BrokerInfoImpl(serviceUrl, brokerId); } } } diff --git a/pulsar-client-admin-shaded/pom.xml b/pulsar-client-admin-shaded/pom.xml index 4aaefe3a275b5..5e536ceebb75d 100644 --- a/pulsar-client-admin-shaded/pom.xml +++ b/pulsar-client-admin-shaded/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-admin Pulsar Client Admin - ${project.groupId} @@ -74,7 +70,6 @@ - maven-antrun-plugin @@ -87,15 +82,12 @@ - + - org.apache.maven.plugins maven-shade-plugin @@ -108,11 +100,10 @@ true true - - org.apache.pulsar:pulsar-client-original - org.apache.pulsar:pulsar-client-admin-original + io.streamnative:pulsar-client-original + io.streamnative:pulsar-client-admin-original org.apache.commons:commons-lang3 commons-codec:commons-codec commons-collections:commons-collections @@ -126,7 +117,7 @@ com.fasterxml.jackson.*:* io.netty:* io.netty.incubator:* - org.apache.pulsar:pulsar-common + io.streamnative:pulsar-common org.apache.bookkeeper:* com.yahoo.datasketches:sketches-core org.glassfish.jersey*:* @@ -148,7 +139,7 @@ org.yaml:snakeyaml io.swagger:* - org.apache.pulsar:pulsar-client-messagecrypto-bc + io.streamnative:pulsar-client-messagecrypto-bc com.fasterxml.jackson.core:jackson-annotations @@ -156,7 +147,7 @@ - org.apache.pulsar:pulsar-client-original + io.streamnative:pulsar-client-original ** @@ -166,7 +157,7 @@ - org.apache.pulsar:pulsar-client-admin-original + io.streamnative:pulsar-client-admin-original ** @@ -177,7 +168,7 @@ - + org.asynchttpclient org.apache.pulsar.shade.org.asynchttpclient @@ -259,51 +250,75 @@ org.reactivestreams org.apache.pulsar.shade.org.reactivestreams - - io.grpc - org.apache.pulsar.shade.io.grpc - - - okio - org.apache.pulsar.shade.okio - - - com.squareup - org.apache.pulsar.shade.com.squareup - - - io.opencensus - org.apache.pulsar.shade.io.opencensus - - - org.eclipse.jetty - org.apache.pulsar.shade.org.eclipse.jetty - - - org.objenesis - org.apache.pulsar.shade.org.objenesis - - - org.yaml - org.apache.pulsar.shade.org.yaml - - - io.swagger - org.apache.pulsar.shade.io.swagger - - - org.apache.bookkeeper - org.apache.pulsar.shade.org.apache.bookkeeper - + + io.grpc + org.apache.pulsar.shade.io.grpc + + + okio + org.apache.pulsar.shade.okio + + + com.squareup + org.apache.pulsar.shade.com.squareup + + + io.opencensus + org.apache.pulsar.shade.io.opencensus + + + org.eclipse.jetty + org.apache.pulsar.shade.org.eclipse.jetty + + + org.objenesis + org.apache.pulsar.shade.org.objenesis + + + org.yaml + org.apache.pulsar.shade.org.yaml + + + io.swagger + org.apache.pulsar.shade.io.swagger + + + org.apache.bookkeeper + org.apache.pulsar.shade.org.apache.bookkeeper + - - + + + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + diff --git a/pulsar-client-admin/pom.xml b/pulsar-client-admin/pom.xml index c811236705971..9ddf04ae917fe 100644 --- a/pulsar-client-admin/pom.xml +++ b/pulsar-client-admin/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-admin-original Pulsar Client Admin Original - ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} pulsar-client-admin-api ${project.version} - org.glassfish.jersey.core jersey-client - org.glassfish.jersey.media jersey-media-json-jackson - jakarta.activation jakarta.activation-api - org.glassfish.jersey.media jersey-media-multipart - com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider - org.glassfish.jersey.inject jersey-hk2 - javax.xml.bind jaxb-api @@ -91,30 +79,25 @@ javax.activation runtime - com.google.guava guava - com.google.code.gson gson - ${project.groupId} pulsar-package-core ${project.version} - org.hamcrest hamcrest test - @@ -141,7 +124,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BrokersImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BrokersImpl.java index 0e6296724b3da..7b4ebb1778d8e 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BrokersImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BrokersImpl.java @@ -75,15 +75,15 @@ public CompletableFuture getLeaderBrokerAsync() { } @Override - public Map getOwnedNamespaces(String cluster, String brokerUrl) + public Map getOwnedNamespaces(String cluster, String brokerId) throws PulsarAdminException { - return sync(() -> getOwnedNamespacesAsync(cluster, brokerUrl)); + return sync(() -> getOwnedNamespacesAsync(cluster, brokerId)); } @Override public CompletableFuture> getOwnedNamespacesAsync( - String cluster, String brokerUrl) { - WebTarget path = adminBrokers.path(cluster).path(brokerUrl).path("ownedNamespaces"); + String cluster, String brokerId) { + WebTarget path = adminBrokers.path(cluster).path(brokerId).path("ownedNamespaces"); return asyncGetRequest(path, new FutureCallback>(){}); } diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java index 009fa67fbaa29..a9d913c016490 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.admin.internal; +import static com.google.common.base.Preconditions.checkArgument; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -200,18 +201,21 @@ public PulsarAdminBuilder tlsProtocols(Set tlsProtocols) { @Override public PulsarAdminBuilder connectionTimeout(int connectionTimeout, TimeUnit connectionTimeoutUnit) { + checkArgument(connectionTimeout >= 0); this.conf.setConnectionTimeoutMs((int) connectionTimeoutUnit.toMillis(connectionTimeout)); return this; } @Override public PulsarAdminBuilder readTimeout(int readTimeout, TimeUnit readTimeoutUnit) { + checkArgument(readTimeout >= 0); this.conf.setReadTimeoutMs((int) readTimeoutUnit.toMillis(readTimeout)); return this; } @Override public PulsarAdminBuilder requestTimeout(int requestTimeout, TimeUnit requestTimeoutUnit) { + checkArgument(requestTimeout >= 0); this.conf.setRequestTimeoutMs((int) requestTimeoutUnit.toMillis(requestTimeout)); return this; } diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java index e0c64319ea2d9..a89da628372bd 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java @@ -965,21 +965,7 @@ public CompletableFuture truncateAsync(String topic) { @Override public CompletableFuture> getMessageByIdAsync(String topic, long ledgerId, long entryId) { - CompletableFuture> future = new CompletableFuture<>(); - getRemoteMessageById(topic, ledgerId, entryId).handle((r, ex) -> { - if (ex != null) { - if (ex instanceof NotFoundException) { - log.warn("Exception '{}' occurred while trying to get message.", ex.getMessage()); - future.complete(r); - } else { - future.completeExceptionally(ex); - } - return null; - } - future.complete(r); - return null; - }); - return future; + return getRemoteMessageById(topic, ledgerId, entryId); } private CompletableFuture> getRemoteMessageById(String topic, long ledgerId, long entryId) { diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TransactionsImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TransactionsImpl.java index 2d1dd408ef6c9..046ef3eb370e8 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TransactionsImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TransactionsImpl.java @@ -168,6 +168,7 @@ public TransactionPendingAckStats getPendingAckStats(String topic, String subNam @Override public CompletableFuture> getSlowTransactionsByCoordinatorIdAsync( Integer coordinatorId, long timeout, TimeUnit timeUnit) { + checkArgument(timeout >= 0); WebTarget path = adminV3Transactions.path("slowTransactions"); path = path.path(timeUnit.toMillis(timeout) + ""); if (coordinatorId != null) { diff --git a/pulsar-client-all/pom.xml b/pulsar-client-all/pom.xml index c893bb6f7e9e3..603dc526df90d 100644 --- a/pulsar-client-all/pom.xml +++ b/pulsar-client-all/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-all Pulsar Client All - ${project.groupId} @@ -70,7 +67,6 @@ test - @@ -108,7 +104,6 @@ - maven-antrun-plugin @@ -121,15 +116,12 @@ - + - org.apache.maven.plugins @@ -146,11 +138,10 @@ true true false - - org.apache.pulsar:pulsar-client-original - org.apache.pulsar:pulsar-client-admin-original + io.streamnative:pulsar-client-original + io.streamnative:pulsar-client-admin-original org.apache.commons:commons-lang3 commons-codec:commons-codec commons-collections:commons-collections @@ -176,8 +167,7 @@ commons-*:* io.swagger:* io.airlift:* - - org.apache.pulsar:pulsar-common + io.streamnative:pulsar-common org.apache.bookkeeper:* com.yahoo.datasketches:sketches-core org.glassfish.jersey*:* @@ -205,7 +195,7 @@ org.apache.commons:commons-compress org.tukaani:xz - org.apache.pulsar:pulsar-client-messagecrypto-bc + io.streamnative:pulsar-client-messagecrypto-bc com.fasterxml.jackson.core:jackson-annotations @@ -213,7 +203,7 @@ - org.apache.pulsar:pulsar-client-original + io.streamnative:pulsar-client-original ** @@ -315,24 +305,24 @@ org.apache.pulsar.shade.org.glassfish - io.grpc - org.apache.pulsar.shade.io.grpc + io.grpc + org.apache.pulsar.shade.io.grpc - okio - org.apache.pulsar.shade.okio + okio + org.apache.pulsar.shade.okio - com.squareup - org.apache.pulsar.shade.com.squareup + com.squareup + org.apache.pulsar.shade.com.squareup - io.opencensus - org.apache.pulsar.shade.io.opencensus + io.opencensus + org.apache.pulsar.shade.io.opencensus - org.eclipse.jetty - org.apache.pulsar.shade.org.eclipse.jetty + org.eclipse.jetty + org.apache.pulsar.shade.org.eclipse.jetty org.objenesis @@ -389,14 +379,13 @@ - - + + - - - 4.0.0 - - - org.apache.pulsar - pulsar - 3.1.0-SNAPSHOT - .. - - - pulsar-client-api - Pulsar Client :: API - - - - - - com.google.protobuf - protobuf-java - provided - - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - - ${pulsar.client.compiler.release} - - - - org.gaul - modernizer-maven-plugin - - true - 8 - - - - modernizer - verify - - modernizer - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - - + + com.google.protobuf + protobuf-java + provided + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + ${pulsar.client.compiler.release} + + + + org.gaul + modernizer-maven-plugin + + true + 8 + + + + modernizer + verify + + modernizer + + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + verify + + check + + + + + + diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java index 870900a48feae..588a34d2069da 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java @@ -126,7 +126,7 @@ public interface ConsumerBuilder extends Cloneable { ConsumerBuilder topics(List topicNames); /** - * Specify a pattern for topics that this consumer subscribes to. + * Specify a pattern for topics(not contains the partition suffix) that this consumer subscribes to. * *

The pattern is applied to subscribe to all topics, within a single namespace, that match the * pattern. @@ -134,13 +134,13 @@ public interface ConsumerBuilder extends Cloneable { *

The consumer automatically subscribes to topics created after itself. * * @param topicsPattern - * a regular expression to select a list of topics to subscribe to + * a regular expression to select a list of topics(not contains the partition suffix) to subscribe to * @return the consumer builder instance */ ConsumerBuilder topicsPattern(Pattern topicsPattern); /** - * Specify a pattern for topics that this consumer subscribes to. + * Specify a pattern for topics(not contains the partition suffix) that this consumer subscribes to. * *

It accepts a regular expression that is compiled into a pattern internally. E.g., * "persistent://public/default/pattern-topic-.*" @@ -151,7 +151,7 @@ public interface ConsumerBuilder extends Cloneable { *

The consumer automatically subscribes to topics created after itself. * * @param topicsPattern - * given regular expression for topics pattern + * given regular expression for topics(not contains the partition suffix) pattern * @return the consumer builder instance */ ConsumerBuilder topicsPattern(String topicsPattern); diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/PulsarClientException.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/PulsarClientException.java index 9409eefe2e0f0..22a97571e532e 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/PulsarClientException.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/PulsarClientException.java @@ -344,6 +344,22 @@ public TopicDoesNotExistException(String msg) { } } + /** + * Not found subscription that cannot be created. + */ + public static class SubscriptionNotFoundException extends PulsarClientException { + /** + * Constructs an {@code SubscriptionNotFoundException} with the specified detail message. + * + * @param msg + * The detail message (which is saved for later retrieval + * by the {@link #getMessage()} method) + */ + public SubscriptionNotFoundException(String msg) { + super(msg); + } + } + /** * Lookup exception thrown by Pulsar client. */ @@ -1159,6 +1175,7 @@ public static boolean isRetriableError(Throwable t) { || t instanceof NotFoundException || t instanceof IncompatibleSchemaException || t instanceof TopicDoesNotExistException + || t instanceof SubscriptionNotFoundException || t instanceof UnsupportedAuthenticationException || t instanceof InvalidMessageException || t instanceof InvalidTopicNameException diff --git a/pulsar-client-api/src/main/resources/findbugsExclude.xml b/pulsar-client-api/src/main/resources/findbugsExclude.xml index 9d73ac29a7bdb..353f01a7bb285 100644 --- a/pulsar-client-api/src/main/resources/findbugsExclude.xml +++ b/pulsar-client-api/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-client-auth-athenz/pom.xml b/pulsar-client-auth-athenz/pom.xml index 3cfcc8be49c98..e676e6347524c 100644 --- a/pulsar-client-auth-athenz/pom.xml +++ b/pulsar-client-auth-athenz/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-auth-athenz jar Athenz authentication plugin for java client - - ${project.groupId} pulsar-client-original ${project.parent.version} true - com.yahoo.athenz athenz-zts-java-client-core - com.yahoo.athenz athenz-cert-refresher - org.bouncycastle bcpkix-jdk18on - com.google.guava guava - org.apache.commons commons-lang3 - @@ -77,7 +67,6 @@ ${pulsar.client.compiler.release} - org.apache.maven.plugins maven-enforcer-plugin @@ -108,7 +97,6 @@ - org.gaul modernizer-maven-plugin @@ -126,7 +114,6 @@ - com.github.spotbugs spotbugs-maven-plugin @@ -144,7 +131,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-client-auth-athenz/src/test/resources/findbugsExclude.xml b/pulsar-client-auth-athenz/src/test/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-client-auth-athenz/src/test/resources/findbugsExclude.xml +++ b/pulsar-client-auth-athenz/src/test/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-auth-sasl jar SASL authentication plugin for java client - - ${project.groupId} pulsar-client-original ${project.parent.version} true - com.google.guava guava - org.apache.commons commons-lang3 - org.projectlombok lombok - javax.ws.rs javax.ws.rs-api - org.glassfish.jersey.core jersey-client - - @@ -78,7 +67,6 @@ ${pulsar.client.compiler.release} - org.apache.maven.plugins maven-enforcer-plugin @@ -109,7 +97,6 @@ - org.gaul modernizer-maven-plugin @@ -127,7 +114,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-client-messagecrypto-bc/pom.xml b/pulsar-client-messagecrypto-bc/pom.xml index 4e2604f33169a..f95f3b9d6b011 100644 --- a/pulsar-client-messagecrypto-bc/pom.xml +++ b/pulsar-client-messagecrypto-bc/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-messagecrypto-bc jar Message crypto for End to End encryption with BouncyCastleProvider - ${project.groupId} @@ -40,7 +37,6 @@ ${project.parent.version} provided - ${project.groupId} bouncy-castle-bc @@ -48,9 +44,7 @@ pkg true - - diff --git a/pulsar-client-shaded/pom.xml b/pulsar-client-shaded/pom.xml index 242fc42b2954f..a046f20c98ac8 100644 --- a/pulsar-client-shaded/pom.xml +++ b/pulsar-client-shaded/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client Pulsar Client Java - ${project.groupId} @@ -49,11 +46,8 @@ ${project.version} - - - org.apache.maven.plugins maven-dependency-plugin @@ -89,7 +83,6 @@ - maven-antrun-plugin @@ -102,15 +95,12 @@ - + - org.apache.maven.plugins @@ -125,10 +115,9 @@ true true false - - org.apache.pulsar:pulsar-client-original + io.streamnative:pulsar-client-original org.apache.bookkeeper:* org.apache.commons:commons-lang3 commons-codec:commons-codec @@ -154,19 +143,17 @@ commons-*:* io.swagger:* io.airlift:* - - org.apache.pulsar:pulsar-common + io.streamnative:pulsar-common com.yahoo.datasketches:sketches-core org.objenesis:* org.yaml:snakeyaml - org.apache.avro:* com.thoughtworks.paranamer:paranamer org.apache.commons:commons-compress org.tukaani:xz - org.apache.pulsar:pulsar-client-messagecrypto-bc + io.streamnative:pulsar-client-messagecrypto-bc com.fasterxml.jackson.core:jackson-annotations @@ -174,7 +161,7 @@ - org.apache.pulsar:pulsar-client-original + io.streamnative:pulsar-client-original ** @@ -302,14 +289,13 @@ - - + + - com.github.spotbugs spotbugs-maven-plugin @@ -323,7 +309,6 @@ - - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-tools-api Pulsar Client Tools API Pulsar Client Tools API - ${project.groupId} @@ -66,7 +64,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -82,5 +79,4 @@ - diff --git a/pulsar-client-tools-customcommand-example/pom.xml b/pulsar-client-tools-customcommand-example/pom.xml index a3a3de19202c2..5789d8f37bb0e 100644 --- a/pulsar-client-tools-customcommand-example/pom.xml +++ b/pulsar-client-tools-customcommand-example/pom.xml @@ -1,3 +1,4 @@ + - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. @@ -31,7 +32,7 @@ Pulsar CLI Custom command example - org.apache.pulsar + io.streamnative pulsar-client-tools-api ${project.version} provided diff --git a/pulsar-client-tools-test/pom.xml b/pulsar-client-tools-test/pom.xml index 74278374da518..b8819a0d3063d 100644 --- a/pulsar-client-tools-test/pom.xml +++ b/pulsar-client-tools-test/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-tools-test Pulsar Client Tools Test Pulsar Client Tools Test - ${project.groupId} @@ -89,7 +87,6 @@ - org.apache.maven.plugins maven-deploy-plugin @@ -126,8 +123,8 @@ copy filters - - + + diff --git a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java index 8b32ad906eac4..30d6a342f18d1 100644 --- a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java +++ b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java @@ -120,6 +120,7 @@ public void testNonDurableSubscribe() throws Exception { properties.setProperty("useTls", "false"); final String topicName = getTopicWithRandomSuffix("non-durable"); + admin.topics().createNonPartitionedTopic(topicName); int numberOfMessages = 10; @Cleanup("shutdownNow") @@ -211,6 +212,7 @@ public void testRead() throws Exception { properties.setProperty("useTls", "false"); final String topicName = getTopicWithRandomSuffix("reader"); + admin.topics().createNonPartitionedTopic(topicName); int numberOfMessages = 10; @Cleanup("shutdownNow") @@ -260,6 +262,7 @@ public void testEncryption() throws Exception { properties.setProperty("useTls", "false"); final String topicName = getTopicWithRandomSuffix("encryption"); + admin.topics().createNonPartitionedTopic(topicName); final String keyUriBase = "file:../pulsar-broker/src/test/resources/certificate/"; final int numberOfMessages = 10; diff --git a/pulsar-client-tools-test/src/test/resources/findbugsExclude.xml b/pulsar-client-tools-test/src/test/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-client-tools-test/src/test/resources/findbugsExclude.xml +++ b/pulsar-client-tools-test/src/test/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-tools Pulsar Client Tools Pulsar Client Tools - com.beust @@ -109,7 +107,6 @@ swagger-core provided - ${project.groupId} @@ -117,9 +114,8 @@ ${project.version} test - - org.apache.pulsar + io.streamnative pulsar-io-batch-discovery-triggerers ${project.version} test @@ -137,9 +133,7 @@ org.apache.commons commons-text - - @@ -159,7 +153,6 @@ - @@ -179,7 +172,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -195,5 +187,4 @@ - diff --git a/pulsar-client/pom.xml b/pulsar-client/pom.xml index 0e45e73636612..738fbd5898e55 100644 --- a/pulsar-client/pom.xml +++ b/pulsar-client/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-client-original Pulsar Client Java - ${project.groupId} pulsar-client-api ${project.parent.version} - ${project.groupId} pulsar-common ${project.parent.version} - ${project.groupId} bouncy-castle-bc ${project.parent.version} pkg - ${project.groupId} @@ -59,7 +53,6 @@ ${project.parent.version} true - io.netty netty-codec-http @@ -72,13 +65,11 @@ io.netty netty-codec-socks - io.swagger swagger-annotations provided - io.netty netty-resolver-dns @@ -93,50 +84,40 @@ netty-resolver-dns-native-macos osx-x86_64 - org.apache.commons commons-lang3 - org.asynchttpclient async-http-client - com.typesafe.netty netty-reactive-streams - org.slf4j slf4j-api - commons-codec commons-codec - com.yahoo.datasketches sketches-core - com.google.code.gson gson - - org.apache.avro avro ${avro.version} - org.apache.avro avro-protobuf @@ -148,36 +129,30 @@ - joda-time joda-time provided - com.google.protobuf protobuf-java provided - com.fasterxml.jackson.module jackson-module-jsonSchema - net.jcip jcip-annotations - com.github.spotbugs spotbugs-annotations provided true - ${project.groupId} @@ -185,22 +160,18 @@ ${project.parent.version} test - org.skyscreamer jsonassert ${skyscreamer.version} test - org.awaitility awaitility test - - @@ -234,7 +205,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -248,7 +218,6 @@ - org.xolstice.maven.plugins protobuf-maven-plugin diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java index e81365d3886cc..8c17d8fcb253c 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AbstractBatchMessageContainer.java @@ -53,6 +53,7 @@ public abstract class AbstractBatchMessageContainer implements BatchMessageConta // allocate a new buffer that can hold the entire batch without needing costly reallocations protected int maxBatchSize = INITIAL_BATCH_BUFFER_SIZE; protected int maxMessagesNum = INITIAL_MESSAGES_NUM; + private volatile long firstAddedTimestamp = 0L; @Override public boolean haveEnoughSpace(MessageImpl msg) { @@ -127,4 +128,19 @@ public boolean hasSameTxn(MessageImpl msg) { return currentTxnidMostBits == msg.getMessageBuilder().getTxnidMostBits() && currentTxnidLeastBits == msg.getMessageBuilder().getTxnidLeastBits(); } + + @Override + public long getFirstAddedTimestamp() { + return firstAddedTimestamp; + } + + protected void tryUpdateTimestamp() { + if (numMessagesInBatch == 1) { + firstAddedTimestamp = System.nanoTime(); + } + } + + protected void clearTimestamp() { + firstAddedTimestamp = 0L; + } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java index 68b781e67d29c..a1017e66760a5 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/AutoClusterFailover.java @@ -329,6 +329,7 @@ public AutoClusterFailoverBuilder switchBackDelay(long switchBackDelay, TimeUnit @Override public AutoClusterFailoverBuilder checkInterval(long interval, TimeUnit timeUnit) { + checkArgument(interval >= 0L, "check interval time must not be negative."); this.checkIntervalMs = timeUnit.toMillis(interval); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerBase.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerBase.java index 8fb4e9f2ce543..ddbe1bc255779 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerBase.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerBase.java @@ -82,4 +82,11 @@ public interface BatchMessageContainerBase extends BatchMessageContainer { * @return belong to the same txn or not */ boolean hasSameTxn(MessageImpl msg); + + /** + * Get the timestamp in nanoseconds when the 1st message is added into the batch container. + * + * @return the timestamp in nanoseconds or 0L if the batch container is empty + */ + long getFirstAddedTimestamp(); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java index 9be7210a38742..bf8c1f9de8201 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java @@ -127,6 +127,7 @@ public boolean add(MessageImpl msg, SendCallback callback) { previousCallback = callback; currentBatchSizeBytes += msg.getDataBuffer().readableBytes(); messages.add(msg); + tryUpdateTimestamp(); if (lowestSequenceId == -1L) { lowestSequenceId = msg.getSequenceId(); @@ -203,6 +204,7 @@ void updateMaxBatchSize(int uncompressedSize) { @Override public void clear() { + clearTimestamp(); messages = new ArrayList<>(maxMessagesNum); firstCallback = null; previousCallback = null; @@ -324,9 +326,11 @@ public OpSendMsg createOpSendMsg() throws IOException { protected void updateAndReserveBatchAllocatedSize(int updatedSizeBytes) { int delta = updatedSizeBytes - batchAllocatedSizeBytes; batchAllocatedSizeBytes = updatedSizeBytes; - if (delta != 0) { - if (producer != null) { + if (producer != null) { + if (delta > 0) { producer.client.getMemoryLimitController().forceReserveMemory(delta); + } else if (delta < 0) { + producer.client.getMemoryLimitController().releaseMemory(-delta); } } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java index e2ce9e2d0bd70..1592d3cae6cb5 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java @@ -57,11 +57,13 @@ public boolean add(MessageImpl msg, SendCallback callback) { numMessagesInBatch++; currentBatchSizeBytes += msg.getDataBuffer().readableBytes(); } + tryUpdateTimestamp(); return isBatchFull(); } @Override public void clear() { + clearTimestamp(); numMessagesInBatch = 0; currentBatchSizeBytes = 0; batches.clear(); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java index d5ce9213211dd..8ceb8e44975c8 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java @@ -26,10 +26,12 @@ import java.util.List; import java.util.Optional; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import org.apache.commons.lang3.mutable.MutableObject; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.SchemaSerializationException; @@ -56,6 +58,12 @@ public class BinaryProtoLookupService implements LookupService { private final String listenerName; private final int maxLookupRedirects; + private final ConcurrentHashMap>> + lookupInProgress = new ConcurrentHashMap<>(); + + private final ConcurrentHashMap> + partitionedMetadataInProgress = new ConcurrentHashMap<>(); + public BinaryProtoLookupService(PulsarClientImpl client, String serviceUrl, boolean useTls, @@ -92,7 +100,21 @@ public void updateServiceUrl(String serviceUrl) throws PulsarClientException { * @return broker-socket-address that serves given topic */ public CompletableFuture> getBroker(TopicName topicName) { - return findBroker(serviceNameResolver.resolveHost(), false, topicName, 0); + final MutableObject newFutureCreated = new MutableObject<>(); + try { + return lookupInProgress.computeIfAbsent(topicName, tpName -> { + CompletableFuture> newFuture = + findBroker(serviceNameResolver.resolveHost(), false, topicName, 0); + newFutureCreated.setValue(newFuture); + return newFuture; + }); + } finally { + if (newFutureCreated.getValue() != null) { + newFutureCreated.getValue().whenComplete((v, ex) -> { + lookupInProgress.remove(topicName, newFutureCreated.getValue()); + }); + } + } } /** @@ -100,7 +122,21 @@ public CompletableFuture> getBroker(T * */ public CompletableFuture getPartitionedTopicMetadata(TopicName topicName) { - return getPartitionedTopicMetadata(serviceNameResolver.resolveHost(), topicName); + final MutableObject newFutureCreated = new MutableObject<>(); + try { + return partitionedMetadataInProgress.computeIfAbsent(topicName, tpName -> { + CompletableFuture newFuture = + getPartitionedTopicMetadata(serviceNameResolver.resolveHost(), topicName); + newFutureCreated.setValue(newFuture); + return newFuture; + }); + } finally { + if (newFutureCreated.getValue() != null) { + newFutureCreated.getValue().whenComplete((v, ex) -> { + partitionedMetadataInProgress.remove(topicName, newFutureCreated.getValue()); + }); + } + } } private CompletableFuture> findBroker(InetSocketAddress socketAddress, diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientBuilderImpl.java index 7677045f0899b..107f15905044c 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientBuilderImpl.java @@ -166,6 +166,7 @@ public ClientBuilder operationTimeout(int operationTimeout, TimeUnit unit) { @Override public ClientBuilder lookupTimeout(int lookupTimeout, TimeUnit unit) { + checkArgument(lookupTimeout >= 0, "lookupTimeout must not be negative"); conf.setLookupTimeoutMs(unit.toMillis(lookupTimeout)); return this; } @@ -331,6 +332,7 @@ public ClientBuilder keepAliveInterval(int keepAliveInterval, TimeUnit unit) { @Override public ClientBuilder connectionTimeout(int duration, TimeUnit unit) { + checkArgument(duration >= 0, "connectionTimeout needs to be >= 0"); conf.setConnectionTimeoutMs((int) unit.toMillis(duration)); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java index 115c71307c4f2..e1b374acbbb54 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java @@ -1291,6 +1291,8 @@ public static PulsarClientException getPulsarClientException(ServerError error, return new PulsarClientException.IncompatibleSchemaException(errorMsg); case TopicNotFound: return new PulsarClientException.TopicDoesNotExistException(errorMsg); + case SubscriptionNotFound: + return new PulsarClientException.SubscriptionNotFoundException(errorMsg); case ConsumerAssignError: return new PulsarClientException.ConsumerAssignException(errorMsg); case NotAllowedError: diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java index 263507dac1dc6..fc7c89c3ce693 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java @@ -43,6 +43,7 @@ public class ConnectionHandler { private volatile long epoch = -1L; protected volatile long lastConnectionClosedTimestamp = 0L; private final AtomicBoolean duringConnect = new AtomicBoolean(false); + protected final int randomKeyForSelectConnection; interface Connection { @@ -58,6 +59,7 @@ default void connectionFailed(PulsarClientException e) { protected ConnectionHandler(HandlerState state, Backoff backoff, Connection connection) { this.state = state; + this.randomKeyForSelectConnection = state.client.getCnxPool().genRandomKeyToSelectCon(); this.connection = connection; this.backoff = backoff; CLIENT_CNX_UPDATER.set(this, null); @@ -88,11 +90,11 @@ protected void grabCnx() { if (state.redirectedClusterURI != null) { InetSocketAddress address = InetSocketAddress.createUnresolved(state.redirectedClusterURI.getHost(), state.redirectedClusterURI.getPort()); - cnxFuture = state.client.getConnection(address, address); + cnxFuture = state.client.getConnection(address, address, randomKeyForSelectConnection); } else if (state.topic == null) { cnxFuture = state.client.getConnectionToServiceUrl(); } else { - cnxFuture = state.client.getConnection(state.topic); // + cnxFuture = state.client.getConnection(state.topic, randomKeyForSelectConnection); } cnxFuture.thenCompose(cnx -> connection.connectionOpened(cnx)) .thenAccept(__ -> duringConnect.set(false)) diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java index 1420d81c688ee..ef3a9249c820a 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java @@ -165,8 +165,18 @@ private static AddressResolver createAddressResolver(ClientCo private static final Random random = new Random(); + public int genRandomKeyToSelectCon() { + if (maxConnectionsPerHosts == 0) { + return -1; + } + return signSafeMod(random.nextInt(), maxConnectionsPerHosts); + } + public CompletableFuture getConnection(final InetSocketAddress address) { - return getConnection(address, address); + if (maxConnectionsPerHosts == 0) { + return getConnection(address, address, -1); + } + return getConnection(address, address, signSafeMod(random.nextInt(), maxConnectionsPerHosts)); } void closeAllConnections() { @@ -204,14 +214,12 @@ void closeAllConnections() { * @return a future that will produce the ClientCnx object */ public CompletableFuture getConnection(InetSocketAddress logicalAddress, - InetSocketAddress physicalAddress) { + InetSocketAddress physicalAddress, final int randomKey) { if (maxConnectionsPerHosts == 0) { // Disable pooling return createConnection(logicalAddress, physicalAddress, -1); } - final int randomKey = signSafeMod(random.nextInt(), maxConnectionsPerHosts); - final ConcurrentMap> innerPool = pool.computeIfAbsent(logicalAddress, a -> new ConcurrentHashMap<>()); CompletableFuture completableFuture = innerPool diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java index fec428824c205..79cd8db4b99bb 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java @@ -64,6 +64,7 @@ import org.apache.pulsar.common.api.proto.CommandSubscribe; import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.collections.BitSetRecyclable; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.common.util.collections.GrowableArrayBlockingQueue; import org.slf4j.Logger; @@ -448,6 +449,7 @@ public void acknowledge(Messages messages) throws PulsarClientException { @Override public void reconsumeLater(Message message, long delayTime, TimeUnit unit) throws PulsarClientException { + checkArgument(delayTime >= 0, "The delay time must not be negative."); reconsumeLater(message, null, delayTime, unit); } @@ -562,6 +564,7 @@ public CompletableFuture reconsumeLaterAsync(Message message, long dela @Override public CompletableFuture reconsumeLaterAsync( Message message, Map customProperties, long delayTime, TimeUnit unit) { + checkArgument(delayTime >= 0, "The delay time must not be negative."); if (!conf.isRetryEnable()) { return FutureUtil.failedFuture(new PulsarClientException(RECONSUME_LATER_ERROR_MSG)); } @@ -598,12 +601,14 @@ public CompletableFuture acknowledgeCumulativeAsync(Message message) { @Override public CompletableFuture reconsumeLaterCumulativeAsync(Message message, long delayTime, TimeUnit unit) { + checkArgument(delayTime >= 0, "The delay time must not be negative."); return reconsumeLaterCumulativeAsync(message, null, delayTime, unit); } @Override public CompletableFuture reconsumeLaterCumulativeAsync( Message message, Map customProperties, long delayTime, TimeUnit unit) { + checkArgument(delayTime >= 0, "The delay time must not be negative."); if (!conf.isRetryEnable()) { return FutureUtil.failedFuture(new PulsarClientException(RECONSUME_LATER_ERROR_MSG)); } @@ -1266,6 +1271,10 @@ protected boolean isValidConsumerEpoch(MessageImpl message) { return true; } + protected boolean isSingleMessageAcked(BitSetRecyclable ackBitSet, int batchIndex) { + return ackBitSet != null && !ackBitSet.get(batchIndex); + } + public boolean hasBatchReceiveTimeout() { return batchReceiveTimeout != null; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java index f644c6a18398f..895273a90c050 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java @@ -497,6 +497,7 @@ public ConsumerBuilder enableBatchIndexAcknowledgment(boolean batchIndexAckno @Override public ConsumerBuilder expireTimeOfIncompleteChunkedMessage(long duration, TimeUnit unit) { + checkArgument(duration >= 0, "expired time of incomplete chunk message must not be negative"); conf.setExpireTimeOfIncompleteChunkedMessageMillis(unit.toMillis(duration)); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java index a929fe9aa6bb2..fa5a8bf3c5aa4 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java @@ -21,6 +21,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static org.apache.pulsar.common.protocol.Commands.DEFAULT_CONSUMER_EPOCH; import static org.apache.pulsar.common.protocol.Commands.hasChecksum; +import static org.apache.pulsar.common.protocol.Commands.serializeWithSize; import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ComparisonChain; @@ -32,9 +33,11 @@ import io.netty.util.Recycler.Handle; import io.netty.util.ReferenceCountUtil; import io.netty.util.Timeout; +import io.netty.util.concurrent.FastThreadLocal; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.BitSet; import java.util.Collections; import java.util.HashMap; @@ -66,6 +69,7 @@ import lombok.AccessLevel; import lombok.Getter; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Triple; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.ConsumerCryptoFailureAction; import org.apache.pulsar.client.api.DeadLetterPolicy; @@ -93,7 +97,9 @@ import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.api.EncryptionContext; import org.apache.pulsar.common.api.EncryptionContext.EncryptionKey; +import org.apache.pulsar.common.api.proto.BaseCommand; import org.apache.pulsar.common.api.proto.BrokerEntryMetadata; +import org.apache.pulsar.common.api.proto.CommandAck; import org.apache.pulsar.common.api.proto.CommandAck.AckType; import org.apache.pulsar.common.api.proto.CommandAck.ValidationError; import org.apache.pulsar.common.api.proto.CommandMessage; @@ -116,6 +122,7 @@ import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.SafeCollectionUtils; import org.apache.pulsar.common.util.collections.BitSetRecyclable; +import org.apache.pulsar.common.util.collections.ConcurrentBitSetRecyclable; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.common.util.collections.GrowableArrayBlockingQueue; import org.slf4j.Logger; @@ -420,7 +427,7 @@ public CompletableFuture unsubscribeAsync() { unsubscribeFuture.completeExceptionally( PulsarClientException.wrap(e.getCause(), String.format("Failed to unsubscribe the subscription %s of topic %s", - topicName.toString(), subscription))); + subscription, topicName.toString()))); return null; }); } else { @@ -613,6 +620,7 @@ protected CompletableFuture doReconsumeLater(Message message, AckType a retryLetterProducer = client.newProducer(Schema.AUTO_PRODUCE_BYTES(schema)) .topic(this.deadLetterPolicy.getRetryLetterTopic()) .enableBatching(false) + .enableChunking(true) .blockIfQueueFull(false) .create(); } @@ -1181,7 +1189,7 @@ protected MessageImpl newSingleMessage(final int index, return null; } - if (ackBitSet != null && !ackBitSet.get(index)) { + if (isSingleMessageAcked(ackBitSet, index)) { return null; } @@ -1419,7 +1427,9 @@ void messageReceived(CommandMessage cmdMessage, ByteBuf headersAndPayload, Clien private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata msgMetadata, MessageIdImpl msgId, MessageIdData messageId, ClientCnx cnx) { - + if (msgMetadata.getChunkId() != (msgMetadata.getNumChunksFromMsg() - 1)) { + increaseAvailablePermits(cnx); + } // Lazy task scheduling to expire incomplete chunk message if (expireTimeOfIncompleteChunkedMessageMillis > 0 && expireChunkMessageTaskScheduled.compareAndSet(false, true)) { @@ -1433,7 +1443,48 @@ private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata m ChunkedMessageCtx chunkedMsgCtx = chunkedMessagesMap.get(msgMetadata.getUuid()); - if (msgMetadata.getChunkId() == 0 && chunkedMsgCtx == null) { + if (msgMetadata.getChunkId() == 0) { + if (chunkedMsgCtx != null) { + // Handle ack hole case when receive duplicated chunks. + // There are two situation that receives chunks with the same sequence ID and chunk ID. + // Situation 1 - Message redeliver: + // For example: + // Chunk-1 sequence ID: 0, chunk ID: 0, msgID: 1:1 + // Chunk-2 sequence ID: 0, chunk ID: 1, msgID: 1:2 + // Chunk-3 sequence ID: 0, chunk ID: 0, msgID: 1:1 + // Chunk-4 sequence ID: 0, chunk ID: 1, msgID: 1:2 + // Chunk-5 sequence ID: 0, chunk ID: 2, msgID: 1:3 + // In this case, chunk-3 and chunk-4 have the same msgID with chunk-1 and chunk-2. + // This may be caused by message redeliver, we can't ack any chunk in this case here. + // Situation 2 - Corrupted chunk message + // For example: + // Chunk-1 sequence ID: 0, chunk ID: 0, msgID: 1:1 + // Chunk-2 sequence ID: 0, chunk ID: 1, msgID: 1:2 + // Chunk-3 sequence ID: 0, chunk ID: 0, msgID: 1:3 + // Chunk-4 sequence ID: 0, chunk ID: 1, msgID: 1:4 + // Chunk-5 sequence ID: 0, chunk ID: 2, msgID: 1:5 + // In this case, all the chunks with different msgIDs and are persistent in the topic. + // But Chunk-1 and Chunk-2 belong to a corrupted chunk message that must be skipped since + // they will not be delivered to end users. So we should ack them here to avoid ack hole. + boolean isCorruptedChunkMessageDetected = Arrays.stream(chunkedMsgCtx.chunkedMessageIds) + .noneMatch(messageId1 -> messageId1 != null && messageId1.ledgerId == messageId.getLedgerId() + && messageId1.entryId == messageId.getEntryId()); + if (isCorruptedChunkMessageDetected) { + Arrays.stream(chunkedMsgCtx.chunkedMessageIds).forEach(messageId1 -> { + if (messageId1 != null) { + doAcknowledge(messageId1, AckType.Individual, Collections.emptyMap(), null); + } + }); + } + // The first chunk of a new chunked-message received before receiving other chunks of previous + // chunked-message + // so, remove previous chunked-message from map and release buffer + if (chunkedMsgCtx.chunkedMsgBuffer != null) { + ReferenceCountUtil.safeRelease(chunkedMsgCtx.chunkedMsgBuffer); + } + chunkedMsgCtx.recycle(); + chunkedMessagesMap.remove(msgMetadata.getUuid()); + } pendingChunkedMessageCount++; if (maxPendingChunkedMessage > 0 && pendingChunkedMessageCount > maxPendingChunkedMessage) { removeOldestPendingChunkedMessage(); @@ -1449,8 +1500,34 @@ private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata m // discard message if chunk is out-of-order if (chunkedMsgCtx == null || chunkedMsgCtx.chunkedMsgBuffer == null || msgMetadata.getChunkId() != (chunkedMsgCtx.lastChunkedMessageId + 1)) { + // Filter and ack duplicated chunks instead of discard ctx. + // For example: + // Chunk-1 sequence ID: 0, chunk ID: 0, msgID: 1:1 + // Chunk-2 sequence ID: 0, chunk ID: 1, msgID: 1:2 + // Chunk-3 sequence ID: 0, chunk ID: 2, msgID: 1:3 + // Chunk-4 sequence ID: 0, chunk ID: 1, msgID: 1:4 + // Chunk-5 sequence ID: 0, chunk ID: 2, msgID: 1:5 + // Chunk-6 sequence ID: 0, chunk ID: 3, msgID: 1:6 + // We should filter and ack chunk-4 and chunk-5. + if (chunkedMsgCtx != null && msgMetadata.getChunkId() <= chunkedMsgCtx.lastChunkedMessageId) { + log.warn("[{}] Receive a duplicated chunk message with messageId [{}], last-chunk-Id [{}], " + + "chunkId [{}], sequenceId [{}]", + msgMetadata.getProducerName(), msgId, chunkedMsgCtx.lastChunkedMessageId, + msgMetadata.getChunkId(), msgMetadata.getSequenceId()); + compressedPayload.release(); + // Just like the above logic of receiving the first chunk again. We only ack this chunk in the message + // duplication case. + boolean isDuplicatedChunk = Arrays.stream(chunkedMsgCtx.chunkedMessageIds) + .noneMatch(messageId1 -> messageId1 != null && messageId1.ledgerId == messageId.getLedgerId() + && messageId1.entryId == messageId.getEntryId()); + if (isDuplicatedChunk) { + doAcknowledge(msgId, AckType.Individual, Collections.emptyMap(), null); + } + return null; + } // means we lost the first chunk: should never happen - log.info("Received unexpected chunk messageId {}, last-chunk-id{}, chunkId = {}", msgId, + log.info("[{}] [{}] Received unexpected chunk messageId {}, last-chunk-id = {}, chunkId = {}", topic, + subscription, msgId, (chunkedMsgCtx != null ? chunkedMsgCtx.lastChunkedMessageId : null), msgMetadata.getChunkId()); if (chunkedMsgCtx != null) { if (chunkedMsgCtx.chunkedMsgBuffer != null) { @@ -1460,7 +1537,6 @@ private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata m } chunkedMessagesMap.remove(msgMetadata.getUuid()); compressedPayload.release(); - increaseAvailablePermits(cnx); if (expireTimeOfIncompleteChunkedMessageMillis > 0 && System.currentTimeMillis() > (msgMetadata.getPublishTime() + expireTimeOfIncompleteChunkedMessageMillis)) { @@ -1479,7 +1555,6 @@ private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata m // if final chunk is not received yet then release payload and return if (msgMetadata.getChunkId() != (msgMetadata.getNumChunksFromMsg() - 1)) { compressedPayload.release(); - increaseAvailablePermits(cnx); return null; } @@ -1564,7 +1639,14 @@ void receiveIndividualMessagesFromBatch(BrokerEntryMetadata brokerEntryMetadata, singleMessageMetadata, uncompressedPayload, batchMessage, schema, true, ackBitSet, ackSetInMessageId, redeliveryCount, consumerEpoch); if (message == null) { - skippedMessages++; + // If it is not in ackBitSet, it means Broker does not want to deliver it to the client, and + // did not decrease the permits in the broker-side. + // So do not acquire more permits for this message. + // Why not skip this single message in the first line of for-loop block? We need call + // "newSingleMessage" to move "payload.readerIndex" to a correct value to get the correct data. + if (!isSingleMessageAcked(ackBitSet, i)) { + skippedMessages++; + } continue; } if (possibleToDeadLetter != null) { @@ -2094,6 +2176,8 @@ private void initDeadLetterProducerIfNeeded() { .initialSubscriptionName(this.deadLetterPolicy.getInitialSubscriptionName()) .topic(this.deadLetterPolicy.getDeadLetterTopic()) .blockIfQueueFull(false) + .enableBatching(false) + .enableChunking(true) .createAsync(); } } finally { @@ -2454,9 +2538,9 @@ private void internalGetLastMessageIdAsync(final Backoff backoff, return; } + log.warn("[{}] [{}] Could not get connection while getLastMessageId -- Will try again in {} ms", + topic, getHandlerName(), nextDelay); ((ScheduledExecutorService) client.getScheduledExecutorProvider().getExecutor()).schedule(() -> { - log.warn("[{}] [{}] Could not get connection while getLastMessageId -- Will try again in {} ms", - topic, getHandlerName(), nextDelay); remainingTime.addAndGet(-nextDelay); internalGetLastMessageIdAsync(backoff, remainingTime, future); }, nextDelay, TimeUnit.MILLISECONDS); @@ -2710,7 +2794,7 @@ private CompletableFuture doTransactionAcknowledgeForResponse(MessageId me final MessageIdAdv messageIdAdv = (MessageIdAdv) messageId; final long ledgerId = messageIdAdv.getLedgerId(); final long entryId = messageIdAdv.getEntryId(); - final ByteBuf cmd; + final List cmdList; if (MessageIdAdvUtils.isBatch(messageIdAdv)) { BitSetRecyclable bitSetRecyclable = BitSetRecyclable.create(); bitSetRecyclable.set(0, messageIdAdv.getBatchSize()); @@ -2720,12 +2804,37 @@ private CompletableFuture doTransactionAcknowledgeForResponse(MessageId me } else { bitSetRecyclable.clear(messageIdAdv.getBatchIndex()); } - cmd = Commands.newAck(consumerId, ledgerId, entryId, bitSetRecyclable, ackType, validationError, properties, - txnID.getLeastSigBits(), txnID.getMostSigBits(), requestId, messageIdAdv.getBatchSize()); + cmdList = Collections.singletonList(Commands.newAck(consumerId, ledgerId, entryId, bitSetRecyclable, + ackType, validationError, properties, txnID.getLeastSigBits(), txnID.getMostSigBits(), requestId, + messageIdAdv.getBatchSize())); bitSetRecyclable.recycle(); } else { - cmd = Commands.newAck(consumerId, ledgerId, entryId, null, ackType, validationError, properties, - txnID.getLeastSigBits(), txnID.getMostSigBits(), requestId); + MessageIdImpl[] chunkMsgIds = this.unAckedChunkedMessageIdSequenceMap.remove(messageIdAdv); + // cumulative ack chunk by the last messageId + if (chunkMsgIds == null || ackType == AckType.Cumulative) { + cmdList = Collections.singletonList(Commands.newAck(consumerId, ledgerId, entryId, null, ackType, + validationError, properties, txnID.getLeastSigBits(), txnID.getMostSigBits(), requestId)); + } else { + if (Commands.peerSupportsMultiMessageAcknowledgment( + getClientCnx().getRemoteEndpointProtocolVersion())) { + List> entriesToAck = + new ArrayList<>(chunkMsgIds.length); + for (MessageIdImpl cMsgId : chunkMsgIds) { + if (cMsgId != null && chunkMsgIds.length > 1) { + entriesToAck.add(Triple.of(cMsgId.getLedgerId(), cMsgId.getEntryId(), null)); + } + } + cmdList = Collections.singletonList( + newMultiTransactionMessageAck(consumerId, txnID, entriesToAck, requestId)); + } else { + cmdList = new ArrayList<>(); + for (MessageIdImpl cMsgId : chunkMsgIds) { + cmdList.add(Commands.newAck(consumerId, cMsgId.ledgerId, cMsgId.entryId, null, ackType, + validationError, properties, + txnID.getLeastSigBits(), txnID.getMostSigBits(), requestId)); + } + } + } } if (ackType == AckType.Cumulative) { @@ -2739,8 +2848,55 @@ private CompletableFuture doTransactionAcknowledgeForResponse(MessageId me .ConnectException("Failed to ack message [" + messageId + "] " + "for transaction [" + txnID + "] due to consumer connect fail, consumer state: " + getState())); } else { - return cnx.newAckForReceipt(cmd, requestId); + List> completableFutures = new LinkedList<>(); + cmdList.forEach(cmd -> completableFutures.add(cnx.newAckForReceipt(cmd, requestId))); + return FutureUtil.waitForAll(completableFutures); + } + } + + private ByteBuf newMultiTransactionMessageAck(long consumerId, TxnID txnID, + List> entries, + long requestID) { + BaseCommand cmd = newMultiMessageAckCommon(entries); + cmd.getAck() + .setConsumerId(consumerId) + .setAckType(AckType.Individual) + .setTxnidLeastBits(txnID.getLeastSigBits()) + .setTxnidMostBits(txnID.getMostSigBits()) + .setRequestId(requestID); + return serializeWithSize(cmd); + } + + private static final FastThreadLocal LOCAL_BASE_COMMAND = new FastThreadLocal() { + @Override + protected BaseCommand initialValue() throws Exception { + return new BaseCommand(); + } + }; + + private static BaseCommand newMultiMessageAckCommon(List> entries) { + BaseCommand cmd = LOCAL_BASE_COMMAND.get() + .clear() + .setType(BaseCommand.Type.ACK); + CommandAck ack = cmd.setAck(); + int entriesCount = entries.size(); + for (int i = 0; i < entriesCount; i++) { + long ledgerId = entries.get(i).getLeft(); + long entryId = entries.get(i).getMiddle(); + ConcurrentBitSetRecyclable bitSet = entries.get(i).getRight(); + MessageIdData msgId = ack.addMessageId() + .setLedgerId(ledgerId) + .setEntryId(entryId); + if (bitSet != null) { + long[] ackSet = bitSet.toLongArray(); + for (int j = 0; j < ackSet.length; j++) { + msgId.addAckSet(ackSet[j]); + } + bitSet.recycle(); + } } + + return cmd; } private CompletableFuture doTransactionAcknowledgeForResponse(List messageIds, AckType ackType, diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ControlledClusterFailover.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ControlledClusterFailover.java index 080d328e3f02c..9d30108ec7a1d 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ControlledClusterFailover.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ControlledClusterFailover.java @@ -236,6 +236,7 @@ public ControlledClusterFailoverBuilder urlProviderHeader(Map he @Override public ControlledClusterFailoverBuilder checkInterval(long interval, @NonNull TimeUnit timeUnit) { + checkArgument(interval >= 0, "The check interval time must not be negative."); this.interval = timeUnit.toMillis(interval); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java index 7969ce402363f..e33efabcc9e0e 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java @@ -19,7 +19,6 @@ package org.apache.pulsar.client.impl; import io.netty.channel.EventLoopGroup; -import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; import java.nio.ByteBuffer; @@ -178,18 +177,14 @@ public CompletableFuture> getSchema(TopicName topicName, by } httpClient.get(path, GetSchemaResponse.class).thenAccept(response -> { if (response.getType() == SchemaType.KEY_VALUE) { - try { - SchemaData data = SchemaData - .builder() - .data(SchemaUtils.convertKeyValueDataStringToSchemaInfoSchema( - response.getData().getBytes(StandardCharsets.UTF_8))) - .type(response.getType()) - .props(response.getProperties()) - .build(); - future.complete(Optional.of(SchemaInfoUtil.newSchemaInfo(schemaName, data))); - } catch (IOException err) { - future.completeExceptionally(err); - } + SchemaData data = SchemaData + .builder() + .data(SchemaUtils.convertKeyValueDataStringToSchemaInfoSchema( + response.getData().getBytes(StandardCharsets.UTF_8))) + .type(response.getType()) + .props(response.getProperties()) + .build(); + future.complete(Optional.of(SchemaInfoUtil.newSchemaInfo(schemaName, data))); } else { future.complete(Optional.of(SchemaInfoUtil.newSchemaInfo(schemaName, response))); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MemoryLimitController.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MemoryLimitController.java index 935e3fad2b59d..c15821c054325 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MemoryLimitController.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MemoryLimitController.java @@ -22,7 +22,9 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; +import lombok.extern.slf4j.Slf4j; +@Slf4j public class MemoryLimitController { private final long memoryLimit; @@ -46,11 +48,19 @@ public MemoryLimitController(long memoryLimitBytes, long triggerThreshold, Runna } public void forceReserveMemory(long size) { + checkPositive(size); + if (size == 0) { + return; + } long newUsage = currentUsage.addAndGet(size); checkTrigger(newUsage - size, newUsage); } public boolean tryReserveMemory(long size) { + checkPositive(size); + if (size == 0) { + return true; + } while (true) { long current = currentUsage.get(); long newUsage = current + size; @@ -68,6 +78,15 @@ public boolean tryReserveMemory(long size) { } } + private static void checkPositive(long memorySize) { + if (memorySize < 0) { + String errorMsg = String.format("Try to reserve/release memory failed, the param memorySize" + + " is a negative value: %s", memorySize); + log.error(errorMsg); + throw new IllegalArgumentException(errorMsg); + } + } + private void checkTrigger(long prevUsage, long newUsage) { if (newUsage >= triggerThreshold && prevUsage < triggerThreshold && trigger != null) { if (triggerRunning.compareAndSet(false, true)) { @@ -81,6 +100,10 @@ private void checkTrigger(long prevUsage, long newUsage) { } public void reserveMemory(long size) throws InterruptedException { + checkPositive(size); + if (size == 0) { + return; + } if (!tryReserveMemory(size)) { mutex.lock(); try { @@ -94,6 +117,10 @@ public void reserveMemory(long size) throws InterruptedException { } public void releaseMemory(long size) { + checkPositive(size); + if (size == 0) { + return; + } long newUsage = currentUsage.addAndGet(-size); if (newUsage + size > memoryLimit && newUsage <= memoryLimit) { diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdAdvUtils.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdAdvUtils.java index c8b18524ec052..a0d1446ba3d55 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdAdvUtils.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageIdAdvUtils.java @@ -64,6 +64,9 @@ static boolean isBatch(MessageIdAdv msgId) { } static MessageIdAdv discardBatch(MessageId messageId) { + if (messageId instanceof ChunkMessageIdImpl) { + return (MessageIdAdv) messageId; + } MessageIdAdv msgId = (MessageIdAdv) messageId; return new MessageIdImpl(msgId.getLedgerId(), msgId.getEntryId(), msgId.getPartitionIndex()); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java index 8a515a9f9b8d7..0272d3d3baf34 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java @@ -49,6 +49,7 @@ import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; +import javax.annotation.Nullable; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.api.BatchReceivePolicy; import org.apache.pulsar.client.api.Consumer; @@ -101,7 +102,8 @@ public class MultiTopicsConsumerImpl extends ConsumerBase { private final MultiTopicConsumerStatsRecorderImpl stats; private final ConsumerConfigurationData internalConfig; - private volatile MessageIdAdv startMessageId; + private final MessageIdAdv startMessageId; + private volatile boolean duringSeek = false; private final long startMessageRollbackDurationInSec; MultiTopicsConsumerImpl(PulsarClientImpl client, ConsumerConfigurationData conf, ExecutorProvider executorProvider, CompletableFuture> subscribeFuture, Schema schema, @@ -235,6 +237,10 @@ private void startReceivingMessages(List> newConsumers) { } private void receiveMessageFromConsumer(ConsumerImpl consumer, boolean batchReceive) { + if (duringSeek) { + log.info("[{}] Pause receiving messages for topic {} due to seek", subscription, consumer.getTopic()); + return; + } CompletableFuture>> messagesFuture; if (batchReceive) { messagesFuture = consumer.batchReceiveAsync().thenApply(msgs -> ((MessagesImpl) msgs).getMessageList()); @@ -252,8 +258,12 @@ private void receiveMessageFromConsumer(ConsumerImpl consumer, boolean batchR } // Process the message, add to the queue and trigger listener or async callback messages.forEach(msg -> { - if (isValidConsumerEpoch((MessageImpl) msg)) { + final boolean skipDueToSeek = duringSeek; + if (isValidConsumerEpoch((MessageImpl) msg) && !skipDueToSeek) { messageReceived(consumer, msg); + } else if (skipDueToSeek) { + log.info("[{}] [{}] Skip processing message {} received during seek", topic, subscription, + msg.getMessageId()); } }); @@ -748,17 +758,12 @@ public void seek(Function function) throws PulsarClientException @Override public CompletableFuture seekAsync(Function function) { - List> futures = new ArrayList<>(consumers.size()); - consumers.values().forEach(consumer -> futures.add(consumer.seekAsync(function))); - unAckedMessageTracker.clear(); - incomingMessages.clear(); - resetIncomingMessageSize(); - return FutureUtil.waitForAll(futures); + return seekAllAsync(consumer -> consumer.seekAsync(function)); } @Override public CompletableFuture seekAsync(MessageId messageId) { - final Consumer internalConsumer; + final ConsumerImpl internalConsumer; if (messageId instanceof TopicMessageId) { TopicMessageId topicMessageId = (TopicMessageId) messageId; internalConsumer = consumers.get(topicMessageId.getOwnerTopic()); @@ -775,25 +780,46 @@ public CompletableFuture seekAsync(MessageId messageId) { ); } - final CompletableFuture seekFuture; if (internalConsumer == null) { - List> futures = new ArrayList<>(consumers.size()); - consumers.values().forEach(consumerImpl -> futures.add(consumerImpl.seekAsync(messageId))); - seekFuture = FutureUtil.waitForAll(futures); + return seekAllAsync(consumer -> consumer.seekAsync(messageId)); } else { - seekFuture = internalConsumer.seekAsync(messageId); + return seekAsyncInternal(Collections.singleton(internalConsumer), __ -> __.seekAsync(messageId)); } + } + @Override + public CompletableFuture seekAsync(long timestamp) { + return seekAllAsync(consumer -> consumer.seekAsync(timestamp)); + } + + private CompletableFuture seekAsyncInternal(Collection> consumers, + Function, CompletableFuture> seekFunc) { + beforeSeek(); + final CompletableFuture future = new CompletableFuture<>(); + FutureUtil.waitForAll(consumers.stream().map(seekFunc).collect(Collectors.toList())) + .whenComplete((__, e) -> afterSeek(future, e)); + return future; + } + + private CompletableFuture seekAllAsync(Function, CompletableFuture> seekFunc) { + return seekAsyncInternal(consumers.values(), seekFunc); + } + + private void beforeSeek() { + duringSeek = true; unAckedMessageTracker.clear(); clearIncomingMessages(); - return seekFuture; } - @Override - public CompletableFuture seekAsync(long timestamp) { - List> futures = new ArrayList<>(consumers.size()); - consumers.values().forEach(consumer -> futures.add(consumer.seekAsync(timestamp))); - return FutureUtil.waitForAll(futures); + private void afterSeek(CompletableFuture seekFuture, @Nullable Throwable throwable) { + duringSeek = false; + log.info("[{}] Resume receiving messages for {} since seek is done", subscription, consumers.keySet()); + startReceivingMessages(new ArrayList<>(consumers.values())); + if (throwable == null) { + seekFuture.complete(null); + } else { + seekFuture.completeExceptionally(throwable); + } } @Override @@ -903,7 +929,10 @@ private void removeTopic(String topic) { } } - // subscribe one more given topic + /*** + * Subscribe one more given topic. + * @param topicName topic name without the partition suffix. + */ public CompletableFuture subscribeAsync(String topicName, boolean createTopicIfDoesNotExist) { TopicName topicNameInstance = getTopicName(topicName); if (topicNameInstance == null) { @@ -999,13 +1028,13 @@ CompletableFuture subscribeAsync(String topicName, int numberPartitions) { private void subscribeTopicPartitions(CompletableFuture subscribeResult, String topicName, int numPartitions, boolean createIfDoesNotExist) { - client.preProcessSchemaBeforeSubscribe(client, schema, topicName).whenComplete((schema, cause) -> { - if (null == cause) { - doSubscribeTopicPartitions(schema, subscribeResult, topicName, numPartitions, createIfDoesNotExist); - } else { - subscribeResult.completeExceptionally(cause); - } - }); + client.preProcessSchemaBeforeSubscribe(client, schema, topicName) + .thenAccept(schema -> { + doSubscribeTopicPartitions(schema, subscribeResult, topicName, numPartitions, createIfDoesNotExist); + }).exceptionally(cause -> { + subscribeResult.completeExceptionally(cause); + return null; + }); } private void doSubscribeTopicPartitions(Schema schema, @@ -1225,7 +1254,10 @@ public CompletableFuture unsubscribeAsync(String topicName) { return unsubscribeFuture; } - // Remove a consumer for a topic + /*** + * Remove a consumer for a topic. + * @param topicName topic name contains the partition suffix. + */ public CompletableFuture removeConsumerAsync(String topicName) { checkArgument(TopicName.isValid(topicName), "Invalid topic name:" + topicName); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java index f780edc95c136..bf7f1066173f6 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java @@ -436,7 +436,7 @@ public CompletableFuture onTopicsExtended(Collection topicsExtende }); // call interceptor with the metadata change onPartitionsChange(topic, currentPartitionNumber); - return null; + return future; } } else { log.error("[{}] not support shrink topic partitions. old: {}, new: {}", diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java index c6ea6216cc1f4..4d179f7d914c2 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java @@ -31,6 +31,7 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.apache.pulsar.client.api.Consumer; @@ -50,11 +51,25 @@ public class PatternMultiTopicsConsumerImpl extends MultiTopicsConsumerImpl watcherFuture; + private final CompletableFuture watcherFuture = new CompletableFuture<>(); protected NamespaceName namespaceName; + + /** + * There is two task to re-check topic changes, the both tasks will not be take affects at the same time. + * 1. {@link #recheckTopicsChangeAfterReconnect}: it will be called after the {@link TopicListWatcher} reconnected + * if you enabled {@link TopicListWatcher}. This backoff used to do a retry if + * {@link #recheckTopicsChangeAfterReconnect} is failed. + * 2. {@link #run} A scheduled task to trigger re-check topic changes, it will be used if you disabled + * {@link TopicListWatcher}. + */ + private final Backoff recheckPatternTaskBackoff; + private final AtomicInteger recheckPatternEpoch = new AtomicInteger(); private volatile Timeout recheckPatternTimeout = null; private volatile String topicsHash; + /*** + * @param topicsPattern The regexp for the topic name(not contains partition suffix). + */ public PatternMultiTopicsConsumerImpl(Pattern topicsPattern, String topicsHash, PulsarClientImpl client, @@ -69,6 +84,11 @@ public PatternMultiTopicsConsumerImpl(Pattern topicsPattern, this.topicsPattern = topicsPattern; this.topicsHash = topicsHash; this.subscriptionMode = subscriptionMode; + this.recheckPatternTaskBackoff = new BackoffBuilder() + .setInitialTime(client.getConfiguration().getInitialBackoffIntervalNanos(), TimeUnit.NANOSECONDS) + .setMax(client.getConfiguration().getMaxBackoffIntervalNanos(), TimeUnit.NANOSECONDS) + .setMandatoryStop(0, TimeUnit.SECONDS) + .create(); if (this.namespaceName == null) { this.namespaceName = getNameSpaceFromPattern(topicsPattern); @@ -78,11 +98,10 @@ public PatternMultiTopicsConsumerImpl(Pattern topicsPattern, this.topicsChangeListener = new PatternTopicsChangedListener(); this.recheckPatternTimeout = client.timer() .newTimeout(this, Math.max(1, conf.getPatternAutoDiscoveryPeriod()), TimeUnit.SECONDS); - this.watcherFuture = new CompletableFuture<>(); if (subscriptionMode == Mode.PERSISTENT) { long watcherId = client.newTopicListWatcherId(); new TopicListWatcher(topicsChangeListener, client, topicsPattern, watcherId, - namespaceName, topicsHash, watcherFuture); + namespaceName, topicsHash, watcherFuture, () -> recheckTopicsChangeAfterReconnect()); watcherFuture .thenAccept(__ -> recheckPatternTimeout.cancel()) .exceptionally(ex -> { @@ -99,40 +118,75 @@ public static NamespaceName getNameSpaceFromPattern(Pattern pattern) { return TopicName.get(pattern.pattern()).getNamespaceObject(); } + /** + * This method will be called after the {@link TopicListWatcher} reconnected after enabled {@link TopicListWatcher}. + */ + private void recheckTopicsChangeAfterReconnect() { + // Skip if closed or the task has been cancelled. + if (getState() == State.Closing || getState() == State.Closed) { + return; + } + // Do check. + recheckTopicsChange().whenComplete((ignore, ex) -> { + if (ex != null) { + log.warn("[{}] Failed to recheck topics change: {}", topic, ex.getMessage()); + long delayMs = recheckPatternTaskBackoff.next(); + client.timer().newTimeout(timeout -> { + recheckTopicsChangeAfterReconnect(); + }, delayMs, TimeUnit.MILLISECONDS); + } else { + recheckPatternTaskBackoff.reset(); + } + }); + } + // TimerTask to recheck topics change, and trigger subscribe/unsubscribe based on the change. @Override public void run(Timeout timeout) throws Exception { if (timeout.isCancelled()) { return; } - client.getLookup().getTopicsUnderNamespace(namespaceName, subscriptionMode, topicsPattern.pattern(), topicsHash) - .thenCompose(getTopicsResult -> { + recheckTopicsChange().exceptionally(ex -> { + log.warn("[{}] Failed to recheck topics change: {}", topic, ex.getMessage()); + return null; + }).thenAccept(__ -> { + // schedule the next re-check task + this.recheckPatternTimeout = client.timer() + .newTimeout(PatternMultiTopicsConsumerImpl.this, + Math.max(1, conf.getPatternAutoDiscoveryPeriod()), TimeUnit.SECONDS); + }); + } - if (log.isDebugEnabled()) { - log.debug("Get topics under namespace {}, topics.size: {}, topicsHash: {}, filtered: {}", - namespaceName, getTopicsResult.getTopics().size(), getTopicsResult.getTopicsHash(), - getTopicsResult.isFiltered()); - getTopicsResult.getTopics().forEach(topicName -> - log.debug("Get topics under namespace {}, topic: {}", namespaceName, topicName)); - } + private CompletableFuture recheckTopicsChange() { + String pattern = topicsPattern.pattern(); + final int epoch = recheckPatternEpoch.incrementAndGet(); + return client.getLookup().getTopicsUnderNamespace(namespaceName, subscriptionMode, pattern, topicsHash) + .thenCompose(getTopicsResult -> { + // If "recheckTopicsChange" has been called more than one times, only make the last one take affects. + // Use "synchronized (recheckPatternTaskBackoff)" instead of + // `synchronized(PatternMultiTopicsConsumerImpl.this)` to avoid locking in a wider range. + synchronized (recheckPatternTaskBackoff) { + if (recheckPatternEpoch.get() > epoch) { + return CompletableFuture.completedFuture(null); + } + if (log.isDebugEnabled()) { + log.debug("Get topics under namespace {}, topics.size: {}, topicsHash: {}, filtered: {}", + namespaceName, getTopicsResult.getTopics().size(), getTopicsResult.getTopicsHash(), + getTopicsResult.isFiltered()); + getTopicsResult.getTopics().forEach(topicName -> + log.debug("Get topics under namespace {}, topic: {}", namespaceName, topicName)); + } - final List oldTopics = new ArrayList<>(getPartitionedTopics()); - for (String partition : getPartitions()) { - TopicName topicName = TopicName.get(partition); - if (!topicName.isPartitioned() || !oldTopics.contains(topicName.getPartitionedTopicName())) { - oldTopics.add(partition); + final List oldTopics = new ArrayList<>(getPartitionedTopics()); + for (String partition : getPartitions()) { + TopicName topicName = TopicName.get(partition); + if (!topicName.isPartitioned() || !oldTopics.contains(topicName.getPartitionedTopicName())) { + oldTopics.add(partition); + } } + return updateSubscriptions(topicsPattern, this::setTopicsHash, getTopicsResult, + topicsChangeListener, oldTopics); } - return updateSubscriptions(topicsPattern, this::setTopicsHash, getTopicsResult, - topicsChangeListener, oldTopics); - }).exceptionally(ex -> { - log.warn("[{}] Failed to recheck topics change: {}", topic, ex.getMessage()); - return null; - }).thenAccept(__ -> { - // schedule the next re-check task - this.recheckPatternTimeout = client.timer() - .newTimeout(PatternMultiTopicsConsumerImpl.this, - Math.max(1, conf.getPatternAutoDiscoveryPeriod()), TimeUnit.SECONDS); }); } @@ -169,14 +223,26 @@ void setTopicsHash(String topicsHash) { } interface TopicsChangedListener { - // unsubscribe and delete ConsumerImpl in the `consumers` map in `MultiTopicsConsumerImpl` based on added topics + /*** + * unsubscribe and delete {@link ConsumerImpl} in the {@link MultiTopicsConsumerImpl#consumers} map in + * {@link MultiTopicsConsumerImpl}. + * @param removedTopics topic names removed(contains the partition suffix). + */ CompletableFuture onTopicsRemoved(Collection removedTopics); - // subscribe and create a list of new ConsumerImpl, added them to the `consumers` map in - // `MultiTopicsConsumerImpl`. + + /*** + * subscribe and create a list of new {@link ConsumerImpl}, added them to the + * {@link MultiTopicsConsumerImpl#consumers} map in {@link MultiTopicsConsumerImpl}. + * @param addedTopics topic names added(contains the partition suffix). + */ CompletableFuture onTopicsAdded(Collection addedTopics); } private class PatternTopicsChangedListener implements TopicsChangedListener { + + /** + * {@inheritDoc} + */ @Override public CompletableFuture onTopicsRemoved(Collection removedTopics) { CompletableFuture removeFuture = new CompletableFuture<>(); @@ -198,6 +264,9 @@ public CompletableFuture onTopicsRemoved(Collection removedTopics) return removeFuture; } + /** + * {@inheritDoc} + */ @Override public CompletableFuture onTopicsAdded(Collection addedTopics) { CompletableFuture addFuture = new CompletableFuture<>(); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java index 9086ccc4ef0e0..0cf776aea5942 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java @@ -124,7 +124,18 @@ public boolean isDuplicate(MessageId messageId) { // Already included in a cumulative ack return true; } else { - return pendingIndividualAcks.contains(MessageIdAdvUtils.discardBatch(messageIdAdv)); + // If "batchIndexAckEnabled" is false, the batched messages acknowledgment will be traced by + // pendingIndividualAcks. So no matter what type the message ID is, check with "pendingIndividualAcks" + // first. + MessageIdAdv key = MessageIdAdvUtils.discardBatch(messageIdAdv); + if (pendingIndividualAcks.contains(key)) { + return true; + } + if (messageIdAdv.getBatchIndex() >= 0) { + ConcurrentBitSetRecyclable bitSet = pendingIndividualBatchIndexAcks.get(key); + return bitSet != null && !bitSet.get(messageIdAdv.getBatchIndex()); + } + return false; } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java index 267b06649d719..8d2d917d7b880 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java @@ -695,6 +695,12 @@ private void serializeAndSendMessage(MessageImpl msg, op = OpSendMsg.create(msg, null, sequenceId, callback); final MessageMetadata finalMsgMetadata = msgMetadata; op.rePopulate = () -> { + if (msgMetadata.hasChunkId()) { + // The message metadata is shared between all chunks in a large message + // We need to reset the chunk id for each call of this method + // It's safe to do that because there is only 1 thread to manipulate this message metadata + finalMsgMetadata.setChunkId(chunkId); + } op.cmd = sendMessage(producerId, sequenceId, numMessages, messageId, finalMsgMetadata, encryptedPayload); }; @@ -1973,6 +1979,11 @@ String getHandlerName() { return producerName; } + @VisibleForTesting + void triggerSendTimer() throws Exception { + run(sendTimeout); + } + /** * Process sendTimeout events. */ @@ -1991,7 +2002,8 @@ public void run(Timeout timeout) throws Exception { } OpSendMsg firstMsg = pendingMessages.peek(); - if (firstMsg == null && (batchMessageContainer == null || batchMessageContainer.isEmpty())) { + if (firstMsg == null && (batchMessageContainer == null || batchMessageContainer.isEmpty() + || batchMessageContainer.getFirstAddedTimestamp() == 0L)) { // If there are no pending messages, reset the timeout to the configured value. timeToWaitMs = conf.getSendTimeoutMs(); } else { @@ -2001,7 +2013,7 @@ public void run(Timeout timeout) throws Exception { } else { // Because we don't flush batch messages while disconnected, we consider them "createdAt" when // they would have otherwise been flushed. - createdAt = lastBatchSendNanoTime + createdAt = batchMessageContainer.getFirstAddedTimestamp() + TimeUnit.MICROSECONDS.toNanos(conf.getBatchingMaxPublishDelayMicros()); } // If there is at least one message, calculate the diff between the message timeout and the elapsed diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java index 6c749a8cf4354..fdabb5fa8cfa5 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java @@ -944,10 +944,20 @@ public void updateTlsTrustStorePathAndPassword(String tlsTrustStorePath, String conf.setTlsTrustStorePassword(tlsTrustStorePassword); } + public CompletableFuture getConnection(final String topic, int randomKeyForSelectConnection) { + TopicName topicName = TopicName.get(topic); + return lookup.getBroker(topicName) + .thenCompose(pair -> getConnection(pair.getLeft(), pair.getRight(), randomKeyForSelectConnection)); + } + + /** + * Only for test. + */ + @VisibleForTesting public CompletableFuture getConnection(final String topic) { TopicName topicName = TopicName.get(topic); return lookup.getBroker(topicName) - .thenCompose(pair -> getConnection(pair.getLeft(), pair.getRight())); + .thenCompose(pair -> getConnection(pair.getLeft(), pair.getRight(), cnxPool.genRandomKeyToSelectCon())); } public CompletableFuture getConnectionToServiceUrl() { @@ -956,12 +966,13 @@ public CompletableFuture getConnectionToServiceUrl() { "Can't get client connection to HTTP service URL", null)); } InetSocketAddress address = lookup.resolveHost(); - return getConnection(address, address); + return getConnection(address, address, cnxPool.genRandomKeyToSelectCon()); } public CompletableFuture getConnection(final InetSocketAddress logicalAddress, - final InetSocketAddress physicalAddress) { - return cnxPool.getConnection(logicalAddress, physicalAddress); + final InetSocketAddress physicalAddress, + final int randomKeyForSelectConnection) { + return cnxPool.getConnection(logicalAddress, physicalAddress, randomKeyForSelectConnection); } /** visible for pulsar-functions. **/ diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderBuilderImpl.java index 2860cda0ceef1..fd01cef9a216f 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderBuilderImpl.java @@ -274,6 +274,7 @@ public ReaderBuilder autoAckOldestChunkedMessageOnQueueFull(boolean autoAckOl @Override public ReaderBuilder expireTimeOfIncompleteChunkedMessage(long duration, TimeUnit unit) { + checkArgument(duration >= 0, "The expired time must not be negative."); conf.setExpireTimeOfIncompleteChunkedMessageMillis(unit.toMillis(duration)); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java index 560636f94622b..d46f7fa140887 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java @@ -284,8 +284,14 @@ private void readTailMessages(Reader reader) { log.error("Reader {} was closed while reading tail messages.", reader.getTopic(), ex); } else { + // Retrying on the other exceptions such as NotConnectedException + try { + Thread.sleep(50); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } log.warn("Reader {} was interrupted while reading tail messages. " - + "Retrying..", reader.getTopic(), ex); + + "Retrying..", reader.getTopic(), ex); readTailMessages(reader); } return null; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicListWatcher.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicListWatcher.java index 2ce784dbaac04..86adf69f06e0f 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicListWatcher.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicListWatcher.java @@ -56,11 +56,17 @@ public class TopicListWatcher extends HandlerState implements ConnectionHandler. private final List previousExceptions = new CopyOnWriteArrayList<>(); private final AtomicReference clientCnxUsedForWatcherRegistration = new AtomicReference<>(); + private final Runnable recheckTopicsChangeAfterReconnect; + + /*** + * @param topicsPattern The regexp for the topic name(not contains partition suffix). + */ public TopicListWatcher(PatternMultiTopicsConsumerImpl.TopicsChangedListener topicsChangeListener, PulsarClientImpl client, Pattern topicsPattern, long watcherId, NamespaceName namespace, String topicsHash, - CompletableFuture watcherFuture) { + CompletableFuture watcherFuture, + Runnable recheckTopicsChangeAfterReconnect) { super(client, topicsPattern.pattern()); this.topicsChangeListener = topicsChangeListener; this.name = "Watcher(" + topicsPattern + ")"; @@ -77,6 +83,7 @@ public TopicListWatcher(PatternMultiTopicsConsumerImpl.TopicsChangedListener top this.namespace = namespace; this.topicsHash = topicsHash; this.watcherFuture = watcherFuture; + this.recheckTopicsChangeAfterReconnect = recheckTopicsChangeAfterReconnect; connectionHandler.grabCnx(); } @@ -138,9 +145,9 @@ public CompletableFuture connectionOpened(ClientCnx cnx) { return; } } - this.connectionHandler.resetBackoff(); + recheckTopicsChangeAfterReconnect.run(); watcherFuture.complete(this); future.complete(null); }).exceptionally((e) -> { diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageImpl.java index 1b6cba2f7234d..1fec08a43f137 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicMessageImpl.java @@ -47,7 +47,7 @@ public class TopicMessageImpl implements Message { } /** - * Get the topic name without partition part of this message. + * Get the topic name with partition part of this message. * @return the name of the topic on which this message was published */ @Override diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java index ebbfca0c3cb3f..4ea6472b9d8b2 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.impl; +import static com.google.common.base.Preconditions.checkArgument; import com.google.common.annotations.VisibleForTesting; import io.netty.buffer.ByteBuf; import io.netty.util.Recycler; @@ -167,6 +168,8 @@ public CompletableFuture connectionOpened(ClientCnx cnx) { return null; }); } else { + LOG.warn("Can not connect to the transaction coordinator because the protocol version {} is " + + "lower than 19", cnx.getRemoteEndpointProtocolVersion()); registerToConnection(cnx); future.complete(null); } @@ -203,6 +206,7 @@ private void failPendingRequest() { } public CompletableFuture newTransactionAsync(long timeout, TimeUnit unit) { + checkArgument(timeout >= 0, "The timeout must not be negative."); if (LOG.isDebugEnabled()) { LOG.debug("New transaction with timeout in ms {}", unit.toMillis(timeout)); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TypedMessageBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TypedMessageBuilderImpl.java index 026f8a1e69e0b..895949fdf32cc 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TypedMessageBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TypedMessageBuilderImpl.java @@ -212,6 +212,7 @@ public TypedMessageBuilder disableReplication() { @Override public TypedMessageBuilder deliverAfter(long delay, TimeUnit unit) { + checkArgument(delay >= 0, "The delay time must not be negative."); return deliverAt(System.currentTimeMillis() + unit.toMillis(delay)); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java index 69f86a1a89f2c..20ec9c3d99af4 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java @@ -138,8 +138,11 @@ public void run(Timeout t) throws Exception { if (!headPartition.isEmpty()) { log.info("[{}] {} messages will be re-delivered", consumerBase, headPartition.size()); headPartition.forEach(messageId -> { - addChunkedMessageIdsAndRemoveFromSequenceMap(messageId, messageIds, consumerBase); - messageIds.add(messageId); + if (messageId instanceof ChunkMessageIdImpl) { + addChunkedMessageIdsAndRemoveFromSequenceMap(messageId, messageIds, consumerBase); + } else { + messageIds.add(messageId); + } messageIdPartitionMap.remove(messageId); }); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ConsumerConfigurationData.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ConsumerConfigurationData.java index 8760926792cd7..3ae0e977d13c4 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ConsumerConfigurationData.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ConsumerConfigurationData.java @@ -65,7 +65,7 @@ public class ConsumerConfigurationData implements Serializable, Cloneable { @ApiModelProperty( name = "topicsPattern", - value = "Topic pattern" + value = "The regexp for the topic name(not contains partition suffix)." ) private Pattern topicsPattern; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ReaderConfigurationData.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ReaderConfigurationData.java index a038383564da2..73d97f1f33607 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ReaderConfigurationData.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ReaderConfigurationData.java @@ -27,8 +27,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import lombok.Data; -import lombok.Getter; -import lombok.Setter; import org.apache.pulsar.client.api.ConsumerCryptoFailureAction; import org.apache.pulsar.client.api.CryptoKeyReader; import org.apache.pulsar.client.api.MessageCrypto; @@ -118,8 +116,6 @@ public class ReaderConfigurationData implements Serializable, Cloneable { private ConsumerCryptoFailureAction cryptoFailureAction = ConsumerCryptoFailureAction.FAIL; @JsonIgnore - @Setter(onMethod_ = @SuppressFBWarnings({"EI_EXPOSE_REP2"})) - @Getter(onMethod_ = @SuppressFBWarnings({"EI_EXPOSE_REP"})) private transient MessageCrypto messageCrypto = null; @ApiModelProperty( @@ -193,4 +189,14 @@ public ReaderConfigurationData clone() { throw new RuntimeException("Failed to clone ReaderConfigurationData"); } } + + @SuppressFBWarnings({"EI_EXPOSE_REP"}) + public MessageCrypto getMessageCrypto() { + return messageCrypto; + } + + @SuppressFBWarnings({"EI_EXPOSE_REP2"}) + public void setMessageCrypto(MessageCrypto messageCrypto) { + this.messageCrypto = messageCrypto; + } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java index c5e9d4781c56f..255dedcbb85e9 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.impl.transaction; +import static com.google.common.base.Preconditions.checkArgument; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; @@ -45,6 +46,7 @@ public TransactionBuilderImpl(PulsarClientImpl client, TransactionCoordinatorCli @Override public TransactionBuilder withTransactionTimeout(long txnTimeout, TimeUnit timeoutUnit) { + checkArgument(txnTimeout >= 0, "The txn timeout must not be negative."); this.txnTimeout = txnTimeout; this.timeUnit = timeoutUnit; return this; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/util/ObjectCache.java b/pulsar-client/src/main/java/org/apache/pulsar/client/util/ObjectCache.java index dc057ffe32daf..cf0620edf98df 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/util/ObjectCache.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/util/ObjectCache.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.util; +import static com.google.common.base.Preconditions.checkArgument; import java.time.Clock; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; @@ -33,6 +34,7 @@ public class ObjectCache implements Supplier { public ObjectCache(Supplier supplier, long cacheDuration, TimeUnit unit) { this(supplier, cacheDuration, unit, Clock.systemUTC()); + checkArgument(cacheDuration >= 0, "The cache duration must not be negative."); } ObjectCache(Supplier supplier, long cacheDuration, TimeUnit unit, Clock clock) { diff --git a/pulsar-client/src/main/resources/findbugsExclude.xml b/pulsar-client/src/main/resources/findbugsExclude.xml index 67c012ad30b8f..1412601fec937 100644 --- a/pulsar-client/src/main/resources/findbugsExclude.xml +++ b/pulsar-client/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java index 0418a54c772cc..1d1a6f85bfd41 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java @@ -65,6 +65,8 @@ public void setup() throws NoSuchFieldException, IllegalAccessException { ConcurrentOpenHashMap.newBuilder().build(); cnx = spy(new ClientCnxTest(new ClientConfigurationData(), eventLoopGroup)); PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); doReturn(client).when(consumer).getClient(); doReturn(cnx).when(consumer).getClientCnx(); doReturn(new ConsumerStatsRecorderImpl()).when(consumer).getStats(); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AutoClusterFailoverTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AutoClusterFailoverTest.java index 36ffa30296bb0..63fbb239439bd 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AutoClusterFailoverTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AutoClusterFailoverTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.client.impl; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; @@ -121,6 +122,8 @@ public void testInitialize() { AutoClusterFailover autoClusterFailover = Mockito.spy((AutoClusterFailover) provider); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); Mockito.doReturn(false).when(autoClusterFailover).probeAvailable(primary); Mockito.doReturn(true).when(autoClusterFailover).probeAvailable(secondary); Mockito.doReturn(configurationData).when(pulsarClient).getConfiguration(); @@ -163,6 +166,8 @@ public void testAutoClusterFailoverSwitchWithoutAuthentication() { AutoClusterFailover autoClusterFailover = Mockito.spy((AutoClusterFailover) provider); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); Mockito.doReturn(false).when(autoClusterFailover).probeAvailable(primary); Mockito.doReturn(true).when(autoClusterFailover).probeAvailable(secondary); Mockito.doReturn(configurationData).when(pulsarClient).getConfiguration(); @@ -217,6 +222,8 @@ public void testAutoClusterFailoverSwitchWithAuthentication() throws IOException AutoClusterFailover autoClusterFailover = Mockito.spy((AutoClusterFailover) provider); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); Mockito.doReturn(false).when(autoClusterFailover).probeAvailable(primary); Mockito.doReturn(true).when(autoClusterFailover).probeAvailable(secondary); Mockito.doReturn(configurationData).when(pulsarClient).getConfiguration(); @@ -270,6 +277,8 @@ public void testAutoClusterFailoverSwitchTlsTrustStore() throws IOException { AutoClusterFailover autoClusterFailover = Mockito.spy((AutoClusterFailover) provider); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); Mockito.doReturn(false).when(autoClusterFailover).probeAvailable(primary); Mockito.doReturn(true).when(autoClusterFailover).probeAvailable(secondary); Mockito.doReturn(configurationData).when(pulsarClient).getConfiguration(); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java index 4b80e19c256d7..abb195c9830d0 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java @@ -105,6 +105,8 @@ public void recoveryAfterOom() { final ProducerConfigurationData producerConfigurationData = new ProducerConfigurationData(); producerConfigurationData.setCompressionType(CompressionType.NONE); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); MemoryLimitController memoryLimitController = mock(MemoryLimitController.class); when(pulsarClient.getMemoryLimitController()).thenReturn(memoryLimitController); try { @@ -148,6 +150,8 @@ public void testMessagesSize() throws Exception { final ProducerConfigurationData producerConfigurationData = new ProducerConfigurationData(); producerConfigurationData.setCompressionType(CompressionType.NONE); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); MemoryLimitController memoryLimitController = mock(MemoryLimitController.class); when(pulsarClient.getMemoryLimitController()).thenReturn(memoryLimitController); try { diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientTestFixtures.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientTestFixtures.java index 4db5dbe877685..ff7d7f12dd452 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientTestFixtures.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientTestFixtures.java @@ -19,7 +19,9 @@ package org.apache.pulsar.client.impl; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import io.netty.channel.ChannelHandlerContext; @@ -72,11 +74,16 @@ static PulsarClientImpl mockClientCnx(PulsarClientImpl clientMock) { .thenReturn(CompletableFuture.completedFuture(mock(ProducerResponse.class))); when(clientCnxMock.channel().remoteAddress()).thenReturn(mock(SocketAddress.class)); when(clientMock.getConnection(any())).thenReturn(CompletableFuture.completedFuture(clientCnxMock)); - when(clientMock.getConnection(any(), any())).thenReturn(CompletableFuture.completedFuture(clientCnxMock)); + when(clientMock.getConnection(anyString())).thenReturn(CompletableFuture.completedFuture(clientCnxMock)); + when(clientMock.getConnection(anyString(), anyInt())) + .thenReturn(CompletableFuture.completedFuture(clientCnxMock)); + when(clientMock.getConnection(any(), any(), anyInt())) + .thenReturn(CompletableFuture.completedFuture(clientCnxMock)); ConnectionPool connectionPoolMock = mock(ConnectionPool.class); when(clientMock.getCnxPool()).thenReturn(connectionPoolMock); when(connectionPoolMock.getConnection(any())).thenReturn(CompletableFuture.completedFuture(clientCnxMock)); - when(connectionPoolMock.getConnection(any(), any())).thenReturn(CompletableFuture.completedFuture(clientCnxMock)); + when(connectionPoolMock.getConnection(any(), any(), anyInt())) + .thenReturn(CompletableFuture.completedFuture(clientCnxMock)); return clientMock; } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java index 8dbd23f9c29c9..3fe136630462f 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java @@ -76,6 +76,8 @@ public class ConsumerBuilderImplTest { @BeforeMethod(alwaysRun = true) public void setup() { PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); ConsumerConfigurationData consumerConfigurationData = mock(ConsumerConfigurationData.class); when(consumerConfigurationData.getTopicsPattern()).thenReturn(Pattern.compile("\\w+")); when(consumerConfigurationData.getSubscriptionName()).thenReturn("testSubscriptionName"); @@ -104,6 +106,8 @@ public void testConsumerBuilderImpl() throws PulsarClientException { @Test(expectedExceptions = IllegalArgumentException.class) public void testConsumerBuilderImplWhenSchemaIsNull() { PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); ConsumerConfigurationData consumerConfigurationData = mock(ConsumerConfigurationData.class); new ConsumerBuilderImpl(client, consumerConfigurationData, null); } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ControlledClusterFailoverTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ControlledClusterFailoverTest.java index 570b139832806..227e0db10b724 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ControlledClusterFailoverTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ControlledClusterFailoverTest.java @@ -31,6 +31,7 @@ import org.testng.annotations.Test; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; @Test(groups = "broker-impl") public class ControlledClusterFailoverTest { @@ -88,6 +89,8 @@ public void testControlledClusterFailoverSwitch() throws IOException { ControlledClusterFailover controlledClusterFailover = Mockito.spy((ControlledClusterFailover) provider); PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(pulsarClient.getCnxPool()).thenReturn(connectionPool); controlledClusterFailover.initialize(pulsarClient); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MemoryLimitControllerTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MemoryLimitControllerTest.java index 78ffa247f7b6e..1aaf3f77da490 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MemoryLimitControllerTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MemoryLimitControllerTest.java @@ -21,6 +21,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -197,4 +198,39 @@ public void testStepRelease() throws Exception { assertTrue(l3.await(1, TimeUnit.SECONDS)); assertEquals(mlc.currentUsage(), 101); } + + @Test + public void testModifyMemoryFailedDueToNegativeParam() throws Exception { + MemoryLimitController mlc = new MemoryLimitController(100); + + try { + mlc.tryReserveMemory(-1); + fail("The test should fail due to calling tryReserveMemory with a negative value."); + } catch (IllegalArgumentException e) { + // Expected ex. + } + + try { + mlc.reserveMemory(-1); + fail("The test should fail due to calling reserveMemory with a negative value."); + } catch (IllegalArgumentException e) { + // Expected ex. + } + + try { + mlc.forceReserveMemory(-1); + fail("The test should fail due to calling forceReserveMemory with a negative value."); + } catch (IllegalArgumentException e) { + // Expected ex. + } + + try { + mlc.releaseMemory(-1); + fail("The test should fail due to calling releaseMemory with a negative value."); + } catch (IllegalArgumentException e) { + // Expected ex. + } + + assertEquals(mlc.currentUsage(), 0); + } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java index 223881d85a87b..2bd18f69386f1 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java @@ -70,6 +70,8 @@ public class PartitionedProducerImplTest { @BeforeMethod(alwaysRun = true) public void setup() { client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); schema = mock(Schema.class); producerInterceptors = mock(ProducerInterceptors.class); producerCreatedFuture = new CompletableFuture<>(); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerBuilderImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerBuilderImplTest.java index bb3e3fc3accf6..b830d375303bb 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerBuilderImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerBuilderImplTest.java @@ -52,6 +52,8 @@ public class ProducerBuilderImplTest { public void setup() { Producer producer = mock(Producer.class); client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); producerBuilderImpl = new ProducerBuilderImpl<>(client, Schema.BYTES); when(client.newProducer()).thenReturn(producerBuilderImpl); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java index 32d0eff6e792e..27e2dcb37cee0 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java @@ -40,6 +40,8 @@ public void testIncrementNumAcksReceived() throws Exception { ClientConfigurationData conf = new ClientConfigurationData(); conf.setStatsIntervalSeconds(1); PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); when(client.getConfiguration()).thenReturn(conf); Timer timer = new HashedWheelTimer(); when(client.timer()).thenReturn(timer); @@ -60,6 +62,8 @@ public void testGetStatsAndCancelStatsTimeoutWithoutArriveUpdateInterval() { ClientConfigurationData conf = new ClientConfigurationData(); conf.setStatsIntervalSeconds(60); PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); when(client.getConfiguration()).thenReturn(conf); Timer timer = new HashedWheelTimer(); when(client.timer()).thenReturn(timer); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java index 54d13538d7867..e0b25db891247 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.client.impl; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.mock; @@ -122,7 +123,7 @@ public void testConsumerIsClosed() throws Exception { when(cnx.ctx()).thenReturn(ctx); when(cnx.sendRequestWithId(any(ByteBuf.class), anyLong())) .thenReturn(CompletableFuture.completedFuture(mock(ProducerResponse.class))); - when(pool.getConnection(any(InetSocketAddress.class), any(InetSocketAddress.class))) + when(pool.getConnection(any(InetSocketAddress.class), any(InetSocketAddress.class), anyInt())) .thenReturn(CompletableFuture.completedFuture(cnx)); ClientConfigurationData conf = new ClientConfigurationData(); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewBuilderImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewBuilderImplTest.java index 9959a2038555c..eee8ba4e8f41a 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewBuilderImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewBuilderImplTest.java @@ -52,6 +52,8 @@ public void setup() { Reader reader = mock(Reader.class); when(reader.readNextAsync()).thenReturn(CompletableFuture.allOf()); client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); when(client.newReader(any(Schema.class))) .thenReturn(new ReaderBuilderImpl(client, Schema.BYTES)); when(client.createReaderAsync(any(ReaderConfigurationData.class), any(Schema.class))) diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewImplTest.java index 68c886bc7211a..6a866034ddbf8 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TableViewImplTest.java @@ -38,6 +38,8 @@ public class TableViewImplTest { @BeforeClass(alwaysRun = true) public void setup() { client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); when(client.newReader(any(Schema.class))) .thenReturn(new ReaderBuilderImpl(client, Schema.BYTES)); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicListWatcherTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicListWatcherTest.java index e462bb4d62cb5..4fa4284109d2e 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicListWatcherTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/TopicListWatcherTest.java @@ -28,7 +28,9 @@ import org.apache.pulsar.common.api.proto.CommandWatchTopicUpdate; import org.apache.pulsar.common.naming.NamespaceName; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -52,6 +54,9 @@ public class TopicListWatcherTest { public void setup() { listener = mock(TopicsChangedListener.class); client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); + when(connectionPool.genRandomKeyToSelectCon()).thenReturn(0); when(client.getConfiguration()).thenReturn(new ClientConfigurationData()); clientCnxFuture = new CompletableFuture<>(); when(client.getConnectionToServiceUrl()).thenReturn(clientCnxFuture); @@ -59,15 +64,18 @@ public void setup() { when(client.timer()).thenReturn(timer); String topic = "persistent://tenant/ns/topic\\d+"; when(client.getConnection(topic)).thenReturn(clientCnxFuture); + when(client.getConnection(topic, 0)).thenReturn(clientCnxFuture); + when(client.getConnection(any(), any(), anyInt())).thenReturn(clientCnxFuture); + when(connectionPool.getConnection(any(), any(), anyInt())).thenReturn(clientCnxFuture); watcherFuture = new CompletableFuture<>(); watcher = new TopicListWatcher(listener, client, Pattern.compile(topic), 7, - NamespaceName.get("tenant/ns"), null, watcherFuture); + NamespaceName.get("tenant/ns"), null, watcherFuture, () -> {}); } @Test public void testWatcherGrabsConnection() { - verify(client).getConnection(any()); + verify(client).getConnection(anyString(), anyInt()); } @Test diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/UnAckedMessageTrackerTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/UnAckedMessageTrackerTest.java index 4ccc514e8e7f1..91ad321048226 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/UnAckedMessageTrackerTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/UnAckedMessageTrackerTest.java @@ -29,12 +29,15 @@ import io.netty.util.HashedWheelTimer; import io.netty.util.Timer; import io.netty.util.concurrent.DefaultThreadFactory; - +import java.time.Duration; import java.util.HashSet; import java.util.concurrent.TimeUnit; import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.MessageIdAdv; import org.apache.pulsar.client.impl.conf.ConsumerConfigurationData; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; +import org.awaitility.Awaitility; import org.testng.annotations.Test; public class UnAckedMessageTrackerTest { @@ -42,6 +45,8 @@ public class UnAckedMessageTrackerTest { @Test public void testAddAndRemove() { PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); Timer timer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-timer", Thread.currentThread().isDaemon()), 1, TimeUnit.MILLISECONDS); when(client.timer()).thenReturn(timer); @@ -77,4 +82,50 @@ public void testAddAndRemove() { timer.stop(); } + @Test + public void testTrackChunkedMessageId() { + PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); + Timer timer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-timer", Thread.currentThread().isDaemon()), + 1, TimeUnit.MILLISECONDS); + when(client.timer()).thenReturn(timer); + + ConsumerBase consumer = mock(ConsumerBase.class); + doNothing().when(consumer).onAckTimeoutSend(any()); + doNothing().when(consumer).redeliverUnacknowledgedMessages(any()); + ConsumerConfigurationData conf = new ConsumerConfigurationData<>(); + conf.setAckTimeoutMillis(1000); + conf.setTickDurationMillis(1000); + UnAckedMessageTracker tracker = new UnAckedMessageTracker(client, consumer, conf); + + assertTrue(tracker.isEmpty()); + assertEquals(tracker.size(), 0); + + // Build chunked message ID + MessageIdImpl[] chunkMsgIds = new MessageIdImpl[5]; + for (int i = 0; i < 5; i++) { + chunkMsgIds[i] = new MessageIdImpl(1L, i, -1); + } + ChunkMessageIdImpl chunkedMessageId = + new ChunkMessageIdImpl(chunkMsgIds[0], chunkMsgIds[chunkMsgIds.length - 1]); + + consumer.unAckedChunkedMessageIdSequenceMap = + ConcurrentOpenHashMap.newBuilder().build(); + consumer.unAckedChunkedMessageIdSequenceMap.put(chunkedMessageId, chunkMsgIds); + + // Redeliver chunked message + tracker.add(chunkedMessageId); + + Awaitility.await() + .pollInterval(Duration.ofMillis(200)) + .atMost(Duration.ofSeconds(3)) + .untilAsserted(() -> assertEquals(tracker.size(), 0)); + + // Assert that all chunk message ID are removed from unAckedChunkedMessageIdSequenceMap + assertEquals(consumer.unAckedChunkedMessageIdSequenceMap.size(), 0); + + timer.stop(); + } + } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/ProtobufSchemaTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/ProtobufSchemaTest.java index 3fcd6f12b982d..85012276d5af1 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/ProtobufSchemaTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/ProtobufSchemaTest.java @@ -41,20 +41,20 @@ public class ProtobufSchemaTest { "\"namespace\":\"org.apache.pulsar.client.schema.proto.Test\"," + "\"fields\":[{\"name\":\"stringField\",\"type\":{\"type\":\"string\"," + "\"avro.java.string\":\"String\"},\"default\":\"\"},{\"name\":\"doubleField\"," + - "\"type\":\"double\",\"default\":0},{\"name\":\"intField\",\"type\":\"int\"," + + "\"type\":\"double\",\"default\":0.0},{\"name\":\"intField\",\"type\":\"int\"," + "\"default\":0},{\"name\":\"testEnum\",\"type\":{\"type\":\"enum\"," + "\"name\":\"TestEnum\",\"symbols\":[\"SHARED\",\"FAILOVER\"]}," + "\"default\":\"SHARED\"},{\"name\":\"nestedField\"," + "\"type\":[\"null\",{\"type\":\"record\",\"name\":\"SubMessage\"," + "\"fields\":[{\"name\":\"foo\",\"type\":{\"type\":\"string\"," + "\"avro.java.string\":\"String\"},\"default\":\"\"}" + - ",{\"name\":\"bar\",\"type\":\"double\",\"default\":0}]}]" + + ",{\"name\":\"bar\",\"type\":\"double\",\"default\":0.0}]}]" + ",\"default\":null},{\"name\":\"repeatedField\",\"type\":{\"type\":\"array\"" + ",\"items\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},\"default\":[]}" + ",{\"name\":\"externalMessage\",\"type\":[\"null\",{\"type\":\"record\"" + ",\"name\":\"ExternalMessage\",\"namespace\":\"org.apache.pulsar.client.schema.proto.ExternalTest\"" + ",\"fields\":[{\"name\":\"stringField\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}," + - "\"default\":\"\"},{\"name\":\"doubleField\",\"type\":\"double\",\"default\":0}]}],\"default\":null}]}"; + "\"default\":\"\"},{\"name\":\"doubleField\",\"type\":\"double\",\"default\":0.0}]}],\"default\":null}]}"; private static final String EXPECTED_PARSING_INFO = "{\"__alwaysAllowNull\":\"true\",\"__jsr310ConversionEnabled\":\"false\"," + "\"__PARSING_INFO__\":\"[{\\\"number\\\":1,\\\"name\\\":\\\"stringField\\\",\\\"type\\\":\\\"STRING\\\"," + diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/generic/MultiVersionSchemaInfoProviderTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/generic/MultiVersionSchemaInfoProviderTest.java index 8959e67023463..bfd6af37e3ea6 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/generic/MultiVersionSchemaInfoProviderTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/generic/MultiVersionSchemaInfoProviderTest.java @@ -27,6 +27,7 @@ import java.util.concurrent.CompletableFuture; import org.apache.pulsar.client.api.schema.SchemaDefinition; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.LookupService; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.schema.AvroSchema; @@ -46,6 +47,8 @@ public class MultiVersionSchemaInfoProviderTest { @BeforeMethod public void setup() { PulsarClientImpl client = mock(PulsarClientImpl.class); + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(client.getCnxPool()).thenReturn(connectionPool); when(client.getLookup()).thenReturn(mock(LookupService.class)); schemaProvider = new MultiVersionSchemaInfoProvider( TopicName.get("persistent://public/default/my-topic"), client); diff --git a/pulsar-common/pom.xml b/pulsar-common/pom.xml index 8c99638878f53..a9f2492f097f9 100644 --- a/pulsar-common/pom.xml +++ b/pulsar-common/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-common Pulsar Common Common libraries needed by client/broker/tools - ${project.groupId} pulsar-client-api ${project.version} - ${project.groupId} pulsar-client-admin-api ${project.version} - io.swagger swagger-annotations provided - org.slf4j slf4j-api - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.module jackson-module-parameter-names - com.fasterxml.jackson.datatype jackson-datatype-jsr310 - com.fasterxml.jackson.datatype jackson-datatype-jdk8 - com.google.guava guava - io.netty netty-handler - io.netty netty-resolver-dns - io.netty netty-transport-native-epoll linux-x86_64 - io.netty netty-transport-native-unix-common linux-x86_64 - org.apache.bookkeeper bookkeeper-common-allocator @@ -115,17 +98,14 @@ - org.apache.bookkeeper cpu-affinity - io.airlift aircompressor - org.apache.bookkeeper circe-checksum @@ -141,66 +121,54 @@ - io.netty netty-tcnative-boringssl-static - io.netty.incubator netty-incubator-transport-classes-io_uring - io.netty.incubator netty-incubator-transport-native-io_uring linux-x86_64 - io.netty.incubator netty-incubator-transport-native-io_uring linux-aarch_64 - io.netty netty-codec-haproxy - org.apache.commons commons-lang3 - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - javax.ws.rs javax.ws.rs-api - commons-io commons-io - com.github.spotbugs spotbugs-annotations provided true - com.google.protobuf - protobuf-java + protobuf-java - org.bouncycastle @@ -208,38 +176,32 @@ ${bouncycastle.bc-fips.version} test - org.lz4 lz4-java 1.5.0 test - com.github.luben zstd-jni test - org.xerial.snappy snappy-java test - com.google.code.gson gson - org.awaitility awaitility test - @@ -249,7 +211,6 @@ ${pulsar.client.compiler.release} - org.gaul modernizer-maven-plugin @@ -267,7 +228,6 @@ - com.github.splunk.lightproto lightproto-maven-plugin @@ -280,7 +240,6 @@ - pl.project13.maven git-commit-id-plugin @@ -305,7 +264,6 @@ - org.codehaus.mojo templating-maven-plugin diff --git a/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaUtils.java b/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaUtils.java index 8acbf26559b7b..881ad424669d2 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaUtils.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaUtils.java @@ -359,8 +359,7 @@ private static byte[] getKeyOrValueSchemaBytes(JsonElement jsonElement) { * @param keyValueSchemaInfoDataJsonBytes the key/value schema info data json bytes * @return the key/value schema info data bytes */ - public static byte[] convertKeyValueDataStringToSchemaInfoSchema( - byte[] keyValueSchemaInfoDataJsonBytes) throws IOException { + public static byte[] convertKeyValueDataStringToSchemaInfoSchema(byte[] keyValueSchemaInfoDataJsonBytes) { JsonObject jsonObject = (JsonObject) toJsonElement(new String(keyValueSchemaInfoDataJsonBytes, UTF_8)); byte[] keyBytes = getKeyOrValueSchemaBytes(jsonObject.get("key")); byte[] valueBytes = getKeyOrValueSchemaBytes(jsonObject.get("value")); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarClassLoader.java b/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarClassLoader.java index 620e1156d3555..9736d8b47ef71 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarClassLoader.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarClassLoader.java @@ -154,6 +154,11 @@ public NarClassLoader run() { }); } + public static List getClasspathFromArchive(File narPath, String narExtractionDirectory) throws IOException { + File unpacked = NarUnpacker.unpackNar(narPath, getNarExtractionDirectory(narExtractionDirectory)); + return getClassPathEntries(unpacked); + } + private static File getNarExtractionDirectory(String configuredDirectory) { return new File(configuredDirectory + "/" + TMP_DIR_PREFIX); } @@ -164,16 +169,11 @@ private static File getNarExtractionDirectory(String configuredDirectory) { * @param narWorkingDirectory * directory to explode nar contents to * @param parent - * @throws IllegalArgumentException - * if the NAR is missing the Java Services API file for FlowFileProcessor implementations. - * @throws ClassNotFoundException - * if any of the FlowFileProcessor implementations defined by the Java Services API cannot be - * loaded. * @throws IOException * if an error occurs while loading the NAR. */ private NarClassLoader(final File narWorkingDirectory, Set additionalJars, ClassLoader parent) - throws ClassNotFoundException, IOException { + throws IOException { super(new URL[0], parent); this.narWorkingDirectory = narWorkingDirectory; @@ -238,22 +238,31 @@ public List getServiceImplementation(String serviceName) throws IOExcept * if the URL list could not be updated. */ private void updateClasspath(File root) throws IOException { - addURL(root.toURI().toURL()); // for compiled classes, META-INF/, etc. + getClassPathEntries(root).forEach(f -> { + try { + addURL(f.toURI().toURL()); + } catch (IOException e) { + log.error("Failed to add entry to classpath: {}", f, e); + } + }); + } + static List getClassPathEntries(File root) { + List classPathEntries = new ArrayList<>(); + classPathEntries.add(root); File dependencies = new File(root, "META-INF/bundled-dependencies"); if (!dependencies.isDirectory()) { - log.warn("{} does not contain META-INF/bundled-dependencies!", narWorkingDirectory); + log.warn("{} does not contain META-INF/bundled-dependencies!", root); } - addURL(dependencies.toURI().toURL()); + classPathEntries.add(dependencies); if (dependencies.isDirectory()) { final File[] jarFiles = dependencies.listFiles(JAR_FILTER); if (jarFiles != null) { Arrays.sort(jarFiles, Comparator.comparing(File::getName)); - for (File libJar : jarFiles) { - addURL(libJar.toURI().toURL()); - } + classPathEntries.addAll(Arrays.asList(jarFiles)); } } + return classPathEntries; } @Override diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarUnpacker.java b/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarUnpacker.java index 9bd5bc48df819..1e34c3e4fe706 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarUnpacker.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/nar/NarUnpacker.java @@ -32,13 +32,14 @@ import java.io.RandomAccessFile; import java.nio.channels.FileChannel; import java.nio.channels.FileLock; +import java.nio.file.Path; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Base64; import java.util.Enumeration; import java.util.concurrent.ConcurrentHashMap; -import java.util.jar.JarEntry; -import java.util.jar.JarFile; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; import lombok.extern.slf4j.Slf4j; /** @@ -113,18 +114,24 @@ static File doUnpackNar(final File nar, final File baseWorkingDirectory, Runnabl * if the NAR could not be unpacked. */ private static void unpack(final File nar, final File workingDirectory) throws IOException { - try (JarFile jarFile = new JarFile(nar)) { - Enumeration jarEntries = jarFile.entries(); - while (jarEntries.hasMoreElements()) { - JarEntry jarEntry = jarEntries.nextElement(); - String name = jarEntry.getName(); - File f = new File(workingDirectory, name); - if (jarEntry.isDirectory()) { + Path workingDirectoryPath = workingDirectory.toPath().normalize(); + try (ZipFile zipFile = new ZipFile(nar)) { + Enumeration zipEntries = zipFile.entries(); + while (zipEntries.hasMoreElements()) { + ZipEntry zipEntry = zipEntries.nextElement(); + String name = zipEntry.getName(); + Path targetFilePath = workingDirectoryPath.resolve(name).normalize(); + if (!targetFilePath.startsWith(workingDirectoryPath)) { + log.error("Invalid zip file with entry '{}'", name); + throw new IOException("Invalid zip file. Aborting unpacking."); + } + File f = targetFilePath.toFile(); + if (zipEntry.isDirectory()) { FileUtils.ensureDirectoryExistAndCanReadAndWrite(f); } else { // The directory entry might appear after the file entry FileUtils.ensureDirectoryExistAndCanReadAndWrite(f.getParentFile()); - makeFile(jarFile.getInputStream(jarEntry), f); + makeFile(zipFile.getInputStream(zipEntry), f); } } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java index f9148ba8699fd..51e181811c228 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java @@ -30,6 +30,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; @@ -86,6 +87,7 @@ public class OffloadPoliciesImpl implements Serializable, OffloadPolicies { public static final Long DEFAULT_OFFLOAD_THRESHOLD_IN_BYTES = null; public static final Long DEFAULT_OFFLOAD_THRESHOLD_IN_SECONDS = null; public static final Long DEFAULT_OFFLOAD_DELETION_LAG_IN_MILLIS = null; + public static final String EXTRA_CONFIG_PREFIX = "managedLedgerOffloadExtraConfig"; public static final String OFFLOAD_THRESHOLD_NAME_IN_CONF_FILE = "managedLedgerOffloadAutoTriggerSizeThresholdBytes"; @@ -121,8 +123,7 @@ public class OffloadPoliciesImpl implements Serializable, OffloadPolicies { private OffloadedReadPriority managedLedgerOffloadedReadPriority = DEFAULT_OFFLOADED_READ_PRIORITY; @Configuration @JsonProperty(access = JsonProperty.Access.READ_WRITE) - private Map managedLedgerExtraConfigurations = null; - + private Map managedLedgerExtraConfigurations = new HashMap<>(); // s3 config, set by service configuration or cli @Configuration @JsonProperty(access = JsonProperty.Access.READ_WRITE) @@ -248,8 +249,7 @@ public static OffloadPoliciesImpl create(String driver, String region, String bu public static OffloadPoliciesImpl create(Properties properties) { OffloadPoliciesImpl data = new OffloadPoliciesImpl(); - Field[] fields = OffloadPoliciesImpl.class.getDeclaredFields(); - Arrays.stream(fields).forEach(f -> { + for (Field f : CONFIGURATION_FIELDS) { if (properties.containsKey(f.getName())) { try { f.setAccessible(true); @@ -260,14 +260,15 @@ public static OffloadPoliciesImpl create(Properties properties) { f.getName(), properties.get(f.getName())), e); } } - }); + } + Map extraConfigurations = properties.entrySet().stream() - .filter(entry -> entry.getKey().toString().startsWith("managedLedgerOffloadExtraConfig")) - .collect(Collectors.toMap( - entry -> entry.getKey().toString().replaceFirst("managedLedgerOffloadExtraConfig", ""), - entry -> entry.getValue().toString())); + .filter(entry -> entry.getKey().toString().startsWith(EXTRA_CONFIG_PREFIX)) + .collect(Collectors.toMap( + entry -> entry.getKey().toString().replaceFirst(EXTRA_CONFIG_PREFIX, ""), + entry -> entry.getValue().toString())); - data.setManagedLedgerExtraConfigurations(extraConfigurations); + data.getManagedLedgerExtraConfigurations().putAll(extraConfigurations); data.compatibleWithBrokerConfigFile(properties); return data; @@ -346,66 +347,21 @@ public boolean bucketValid() { public Properties toProperties() { Properties properties = new Properties(); - setProperty(properties, "managedLedgerOffloadedReadPriority", this.getManagedLedgerOffloadedReadPriority()); - setProperty(properties, "offloadersDirectory", this.getOffloadersDirectory()); - setProperty(properties, "managedLedgerOffloadDriver", this.getManagedLedgerOffloadDriver()); - setProperty(properties, "managedLedgerOffloadMaxThreads", - this.getManagedLedgerOffloadMaxThreads()); - setProperty(properties, "managedLedgerOffloadPrefetchRounds", - this.getManagedLedgerOffloadPrefetchRounds()); - setProperty(properties, "managedLedgerOffloadThresholdInBytes", - this.getManagedLedgerOffloadThresholdInBytes()); - setProperty(properties, "managedLedgerOffloadThresholdInSeconds", - this.getManagedLedgerOffloadThresholdInSeconds()); - setProperty(properties, "managedLedgerOffloadDeletionLagInMillis", - this.getManagedLedgerOffloadDeletionLagInMillis()); - setProperty(properties, "managedLedgerOffloadExtraConfigurations", - this.getManagedLedgerExtraConfigurations()); - - if (this.isS3Driver()) { - setProperty(properties, "s3ManagedLedgerOffloadRegion", - this.getS3ManagedLedgerOffloadRegion()); - setProperty(properties, "s3ManagedLedgerOffloadBucket", - this.getS3ManagedLedgerOffloadBucket()); - setProperty(properties, "s3ManagedLedgerOffloadServiceEndpoint", - this.getS3ManagedLedgerOffloadServiceEndpoint()); - setProperty(properties, "s3ManagedLedgerOffloadMaxBlockSizeInBytes", - this.getS3ManagedLedgerOffloadMaxBlockSizeInBytes()); - setProperty(properties, "s3ManagedLedgerOffloadCredentialId", - this.getS3ManagedLedgerOffloadCredentialId()); - setProperty(properties, "s3ManagedLedgerOffloadCredentialSecret", - this.getS3ManagedLedgerOffloadCredentialSecret()); - setProperty(properties, "s3ManagedLedgerOffloadRole", - this.getS3ManagedLedgerOffloadRole()); - setProperty(properties, "s3ManagedLedgerOffloadRoleSessionName", - this.getS3ManagedLedgerOffloadRoleSessionName()); - setProperty(properties, "s3ManagedLedgerOffloadReadBufferSizeInBytes", - this.getS3ManagedLedgerOffloadReadBufferSizeInBytes()); - } else if (this.isGcsDriver()) { - setProperty(properties, "gcsManagedLedgerOffloadRegion", - this.getGcsManagedLedgerOffloadRegion()); - setProperty(properties, "gcsManagedLedgerOffloadBucket", - this.getGcsManagedLedgerOffloadBucket()); - setProperty(properties, "gcsManagedLedgerOffloadMaxBlockSizeInBytes", - this.getGcsManagedLedgerOffloadMaxBlockSizeInBytes()); - setProperty(properties, "gcsManagedLedgerOffloadReadBufferSizeInBytes", - this.getGcsManagedLedgerOffloadReadBufferSizeInBytes()); - setProperty(properties, "gcsManagedLedgerOffloadServiceAccountKeyFile", - this.getGcsManagedLedgerOffloadServiceAccountKeyFile()); - } else if (this.isFileSystemDriver()) { - setProperty(properties, "fileSystemProfilePath", this.getFileSystemProfilePath()); - setProperty(properties, "fileSystemURI", this.getFileSystemURI()); - } - - setProperty(properties, "managedLedgerOffloadBucket", this.getManagedLedgerOffloadBucket()); - setProperty(properties, "managedLedgerOffloadRegion", this.getManagedLedgerOffloadRegion()); - setProperty(properties, "managedLedgerOffloadServiceEndpoint", - this.getManagedLedgerOffloadServiceEndpoint()); - setProperty(properties, "managedLedgerOffloadMaxBlockSizeInBytes", - this.getManagedLedgerOffloadMaxBlockSizeInBytes()); - setProperty(properties, "managedLedgerOffloadReadBufferSizeInBytes", - this.getManagedLedgerOffloadReadBufferSizeInBytes()); - + for (Field f : CONFIGURATION_FIELDS) { + try { + f.setAccessible(true); + if ("managedLedgerExtraConfigurations".equals(f.getName())) { + Map extraConfig = (Map) f.get(this); + extraConfig.forEach((key, value) -> { + setProperty(properties, EXTRA_CONFIG_PREFIX + key, value); + }); + } else { + setProperty(properties, f.getName(), f.get(this)); + } + } catch (Exception e) { + throw new IllegalArgumentException("An error occurred while processing the field: " + f.getName(), e); + } + } return properties; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java index cf0cd820a6d10..3982900041813 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java @@ -1555,6 +1555,9 @@ public static BaseCommand newWatchTopicList( return cmd; } + /*** + * @param topics topic names which are matching, the topic name contains the partition suffix. + */ public static BaseCommand newWatchTopicListSuccess(long requestId, long watcherId, String topicsHash, List topics) { BaseCommand cmd = localCmd(Type.WATCH_TOPIC_LIST_SUCCESS); @@ -1570,6 +1573,10 @@ public static BaseCommand newWatchTopicListSuccess(long requestId, long watcherI return cmd; } + /** + * @param deletedTopics topic names deleted(contains the partition suffix). + * @param newTopics topics names added(contains the partition suffix). + */ public static BaseCommand newWatchTopicUpdate(long watcherId, List newTopics, List deletedTopics, String topicsHash) { BaseCommand cmd = localCmd(Type.WATCH_TOPIC_UPDATE); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/OptionalProxyProtocolDecoder.java b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/OptionalProxyProtocolDecoder.java index 2f0a7884dde35..b4e15f8cd1d75 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/OptionalProxyProtocolDecoder.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/OptionalProxyProtocolDecoder.java @@ -19,36 +19,63 @@ package org.apache.pulsar.common.protocol; import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.handler.codec.ProtocolDetectionResult; import io.netty.handler.codec.ProtocolDetectionState; import io.netty.handler.codec.haproxy.HAProxyMessageDecoder; import io.netty.handler.codec.haproxy.HAProxyProtocolVersion; +import lombok.extern.slf4j.Slf4j; /** * Decoder that added whether a new connection is prefixed with the ProxyProtocol. * More about the ProxyProtocol see: http://www.haproxy.org/download/1.8/doc/proxy-protocol.txt. */ +@Slf4j public class OptionalProxyProtocolDecoder extends ChannelInboundHandlerAdapter { public static final String NAME = "optional-proxy-protocol-decoder"; + public static final int MIN_BYTES_SIZE_TO_DETECT_PROTOCOL = 12; + + private CompositeByteBuf cumulatedByteBuf; + @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { if (msg instanceof ByteBuf) { - ProtocolDetectionResult result = - HAProxyMessageDecoder.detectProtocol((ByteBuf) msg); - // should accumulate data if need more data to detect the protocol + // Combine cumulated buffers. + ByteBuf buf = (ByteBuf) msg; + if (cumulatedByteBuf != null) { + buf = cumulatedByteBuf.addComponent(true, buf); + } + + ProtocolDetectionResult result = HAProxyMessageDecoder.detectProtocol(buf); if (result.state() == ProtocolDetectionState.NEEDS_MORE_DATA) { + // Accumulate data if need more data to detect the protocol. + if (cumulatedByteBuf == null) { + cumulatedByteBuf = new CompositeByteBuf(ctx.alloc(), false, MIN_BYTES_SIZE_TO_DETECT_PROTOCOL, buf); + } return; } + cumulatedByteBuf = null; if (result.state() == ProtocolDetectionState.DETECTED) { ctx.pipeline().addAfter(NAME, null, new HAProxyMessageDecoder()); - ctx.pipeline().remove(this); } + ctx.pipeline().remove(this); + super.channelRead(ctx, buf); + } else { + super.channelRead(ctx, msg); + } + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + super.channelInactive(ctx); + if (cumulatedByteBuf != null) { + log.info("Release cumulated byte buffer when channel inactive."); + cumulatedByteBuf = null; } - super.channelRead(ctx, msg); } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicList.java b/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicList.java index 250cea217ee5f..4c0a8d500b703 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicList.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/topics/TopicList.java @@ -47,13 +47,16 @@ public static List filterTopics(List original, String regex) { } public static List filterTopics(List original, Pattern topicsPattern) { - final Pattern shortenedTopicsPattern = topicsPattern.toString().contains(SCHEME_SEPARATOR) - ? Pattern.compile(SCHEME_SEPARATOR_PATTERN.split(topicsPattern.toString())[1]) : topicsPattern; + final Pattern shortenedTopicsPattern = Pattern.compile(removeTopicDomainScheme(topicsPattern.toString())); return original.stream() .map(TopicName::get) + .filter(topicName -> { + String partitionedTopicName = topicName.getPartitionedTopicName(); + String removedScheme = SCHEME_SEPARATOR_PATTERN.split(partitionedTopicName)[1]; + return shortenedTopicsPattern.matcher(removedScheme).matches(); + }) .map(TopicName::toString) - .filter(topic -> shortenedTopicsPattern.matcher(SCHEME_SEPARATOR_PATTERN.split(topic)[1]).matches()) .collect(Collectors.toList()); } @@ -78,4 +81,16 @@ public static Set minus(Collection list1, Collection lis s1.removeAll(list2); return s1; } + + private static String removeTopicDomainScheme(String originalRegexp) { + if (!originalRegexp.toString().contains(SCHEME_SEPARATOR)) { + return originalRegexp; + } + String removedTopicDomain = SCHEME_SEPARATOR_PATTERN.split(originalRegexp.toString())[1]; + if (originalRegexp.contains("^")) { + return String.format("^%s", removedTopicDomain); + } else { + return removedTopicDomain; + } + } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java index 2b082b4a7899b..6f62589853593 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.common.util; +import com.google.common.util.concurrent.MoreExecutors; import java.time.Duration; import java.util.ArrayList; import java.util.Collection; @@ -55,6 +56,11 @@ public static CompletableFuture waitForAll(Collection runWithCurrentThread(Runnable runnable) { + return CompletableFuture.runAsync( + () -> runnable.run(), MoreExecutors.directExecutor()); + } + public static CompletableFuture> waitForAll(Stream>> futures) { return futures.reduce(CompletableFuture.completedFuture(new ArrayList<>()), (pre, curr) -> pre.thenCompose(preV -> curr.thenApply(currV -> { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java index 4ecb29b2462cc..17199664f12d1 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java @@ -28,6 +28,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import lombok.Builder; +import lombok.extern.slf4j.Slf4j; /** * A Rate Limiter that distributes permits at a configurable rate. Each {@link #acquire()} blocks if necessary until a @@ -50,6 +51,7 @@ *

  • Faster: RateLimiter is light-weight and faster than Guava-RateLimiter
  • * */ +@Slf4j public class RateLimiter implements AutoCloseable{ private final ScheduledExecutorService executorService; private long rateTime; @@ -175,7 +177,10 @@ public synchronized boolean tryAcquire() { * @return {@code true} if the permits were acquired, {@code false} otherwise */ public synchronized boolean tryAcquire(long acquirePermit) { - checkArgument(!isClosed(), "Rate limiter is already shutdown"); + if (isClosed()) { + log.info("The current rate limiter is already shutdown, acquire permits directly."); + return true; + } // lazy init and start task only once application start using it if (renewTask == null) { renewTask = createTask(); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/BitSetRecyclable.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/BitSetRecyclable.java index 12ce7eb74c72b..b801d5f2b05a1 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/BitSetRecyclable.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/BitSetRecyclable.java @@ -216,6 +216,14 @@ public static BitSetRecyclable valueOf(byte[] bytes) { return BitSetRecyclable.valueOf(ByteBuffer.wrap(bytes)); } + /** + * Copy a BitSetRecyclable. + */ + public static BitSetRecyclable valueOf(BitSetRecyclable src) { + // The internal implementation will do the array-copy. + return valueOf(src.words); + } + /** * Returns a new bit set containing all the bits in the given byte * buffer between its position and limit. diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java index 31b4cb7cbf152..f5b47e7f1ab7a 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java @@ -306,16 +306,17 @@ private static final class Section extends StampedLock { } V get(long key, int keyHash) { - int bucket = keyHash; - long stamp = tryOptimisticRead(); boolean acquiredLock = false; + // add local variable here, so OutOfBound won't happen + long[] keys = this.keys; + V[] values = this.values; + // calculate table.length as capacity to avoid rehash changing capacity + int bucket = signSafeMod(keyHash, values.length); + try { while (true) { - int capacity = this.capacity; - bucket = signSafeMod(bucket, capacity); - // First try optimistic locking long storedKey = keys[bucket]; V storedValue = values[bucket]; @@ -333,16 +334,15 @@ V get(long key, int keyHash) { if (!acquiredLock) { stamp = readLock(); acquiredLock = true; + + // update local variable + keys = this.keys; + values = this.values; + bucket = signSafeMod(keyHash, values.length); storedKey = keys[bucket]; storedValue = values[bucket]; } - if (capacity != this.capacity) { - // There has been a rehashing. We need to restart the search - bucket = keyHash; - continue; - } - if (storedKey == key) { return storedValue != DeletedValue ? storedValue : null; } else if (storedValue == EmptyValue) { @@ -350,8 +350,7 @@ V get(long key, int keyHash) { return null; } } - - ++bucket; + bucket = (bucket + 1) & (values.length - 1); } } finally { if (acquiredLock) { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java index c0ccad9b73d5b..c3babbb8d1103 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java @@ -284,6 +284,9 @@ public Map asMap() { // A section is a portion of the hash map that is covered by a single @SuppressWarnings("serial") private static final class Section extends StampedLock { + // Each item take up 4 continuous array space. + private static final int ITEM_SIZE = 4; + // Keys and values are stored interleaved in the table array private volatile long[] table; @@ -306,7 +309,7 @@ private static final class Section extends StampedLock { float expandFactor, float shrinkFactor) { this.capacity = alignToPowerOfTwo(capacity); this.initCapacity = this.capacity; - this.table = new long[4 * this.capacity]; + this.table = new long[ITEM_SIZE * this.capacity]; this.size = 0; this.usedBuckets = 0; this.autoShrink = autoShrink; @@ -322,7 +325,10 @@ private static final class Section extends StampedLock { LongPair get(long key1, long key2, int keyHash) { long stamp = tryOptimisticRead(); boolean acquiredLock = false; - int bucket = signSafeMod(keyHash, capacity); + // add local variable here, so OutOfBound won't happen + long[] table = this.table; + // calculate table.length / 4 as capacity to avoid rehash changing capacity + int bucket = signSafeMod(keyHash, table.length / ITEM_SIZE); try { while (true) { @@ -345,8 +351,9 @@ LongPair get(long key1, long key2, int keyHash) { if (!acquiredLock) { stamp = readLock(); acquiredLock = true; - - bucket = signSafeMod(keyHash, capacity); + // update local variable + table = this.table; + bucket = signSafeMod(keyHash, table.length / ITEM_SIZE); storedKey1 = table[bucket]; storedKey2 = table[bucket + 1]; storedValue1 = table[bucket + 2]; @@ -361,7 +368,7 @@ LongPair get(long key1, long key2, int keyHash) { } } - bucket = (bucket + 4) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (acquiredLock) { @@ -413,7 +420,7 @@ boolean put(long key1, long key2, long value1, long value2, int keyHash, boolean } } - bucket = (bucket + 4) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (usedBuckets > resizeThresholdUp) { @@ -454,7 +461,7 @@ private boolean remove(long key1, long key2, long value1, long value2, int keyHa return false; } - bucket = (bucket + 4) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { @@ -480,7 +487,7 @@ private boolean remove(long key1, long key2, long value1, long value2, int keyHa } private void cleanBucket(int bucket) { - int nextInArray = (bucket + 4) & (table.length - 1); + int nextInArray = (bucket + ITEM_SIZE) & (table.length - 1); if (table[nextInArray] == EmptyKey) { table[bucket] = EmptyKey; table[bucket + 1] = EmptyKey; @@ -489,7 +496,7 @@ private void cleanBucket(int bucket) { --usedBuckets; // Cleanup all the buckets that were in `DeletedKey` state, so that we can reduce unnecessary expansions - bucket = (bucket - 4) & (table.length - 1); + bucket = (bucket - ITEM_SIZE) & (table.length - 1); while (table[bucket] == DeletedKey) { table[bucket] = EmptyKey; table[bucket + 1] = EmptyKey; @@ -497,7 +504,7 @@ private void cleanBucket(int bucket) { table[bucket + 3] = ValueNotFound; --usedBuckets; - bucket = (bucket - 4) & (table.length - 1); + bucket = (bucket - ITEM_SIZE) & (table.length - 1); } } else { table[bucket] = DeletedKey; @@ -540,7 +547,7 @@ public void forEach(BiConsumerLongPair processor) { } // Go through all the buckets for this section - for (int bucket = 0; bucket < table.length; bucket += 4) { + for (int bucket = 0; bucket < table.length; bucket += ITEM_SIZE) { long storedKey1 = table[bucket]; long storedKey2 = table[bucket + 1]; long storedValue1 = table[bucket + 2]; @@ -569,11 +576,11 @@ public void forEach(BiConsumerLongPair processor) { } private void rehash(int newCapacity) { - long[] newTable = new long[4 * newCapacity]; + long[] newTable = new long[ITEM_SIZE * newCapacity]; Arrays.fill(newTable, EmptyKey); // Re-hash table - for (int i = 0; i < table.length; i += 4) { + for (int i = 0; i < table.length; i += ITEM_SIZE) { long storedKey1 = table[i]; long storedKey2 = table[i + 1]; long storedValue1 = table[i + 2]; @@ -593,7 +600,7 @@ private void rehash(int newCapacity) { } private void shrinkToInitCapacity() { - long[] newTable = new long[4 * initCapacity]; + long[] newTable = new long[ITEM_SIZE * initCapacity]; Arrays.fill(newTable, EmptyKey); table = newTable; @@ -622,7 +629,7 @@ private static void insertKeyValueNoLock(long[] table, int capacity, long key1, return; } - bucket = (bucket + 4) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } } @@ -641,6 +648,8 @@ static final long hash(long key1, long key2) { } static final int signSafeMod(long n, int max) { + // as the ITEM_SIZE of Section is 4, so the index is the multiple of 4 + // that is to left shift 2 bits return (int) (n & (max - 1)) << 2; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java index 2a1090503857c..f0f04c4edf904 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java @@ -294,6 +294,9 @@ public Set items(int numberOfItems, LongPairFunction longPairConverter // A section is a portion of the hash map that is covered by a single @SuppressWarnings("serial") private static final class Section extends StampedLock { + // Each item take up 2 continuous array space. + private static final int ITEM_SIZE = 2; + // Keys and values are stored interleaved in the table array private volatile long[] table; @@ -315,7 +318,7 @@ private static final class Section extends StampedLock { float expandFactor, float shrinkFactor) { this.capacity = alignToPowerOfTwo(capacity); this.initCapacity = this.capacity; - this.table = new long[2 * this.capacity]; + this.table = new long[ITEM_SIZE * this.capacity]; this.size = 0; this.usedBuckets = 0; this.autoShrink = autoShrink; @@ -331,7 +334,11 @@ private static final class Section extends StampedLock { boolean contains(long item1, long item2, int hash) { long stamp = tryOptimisticRead(); boolean acquiredLock = false; - int bucket = signSafeMod(hash, capacity); + + // add local variable here, so OutOfBound won't happen + long[] table = this.table; + // calculate table.length / 2 as capacity to avoid rehash changing capacity + int bucket = signSafeMod(hash, table.length / ITEM_SIZE); try { while (true) { @@ -353,7 +360,9 @@ boolean contains(long item1, long item2, int hash) { stamp = readLock(); acquiredLock = true; - bucket = signSafeMod(hash, capacity); + // update local variable + table = this.table; + bucket = signSafeMod(hash, table.length / ITEM_SIZE); storedItem1 = table[bucket]; storedItem2 = table[bucket + 1]; } @@ -366,7 +375,7 @@ boolean contains(long item1, long item2, int hash) { } } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (acquiredLock) { @@ -410,7 +419,7 @@ boolean add(long item1, long item2, long hash) { } } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (usedBuckets > resizeThresholdUp) { @@ -445,7 +454,7 @@ private boolean remove(long item1, long item2, int hash) { return false; } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { tryShrinkThenUnlock(stamp); @@ -459,7 +468,7 @@ private int removeIf(LongPairPredicate filter) { // Go through all the buckets for this section long stamp = writeLock(); try { - for (int bucket = 0; bucket < table.length; bucket += 2) { + for (int bucket = 0; bucket < table.length; bucket += ITEM_SIZE) { long storedItem1 = table[bucket]; long storedItem2 = table[bucket + 1]; if (storedItem1 != DeletedItem && storedItem1 != EmptyItem) { @@ -498,7 +507,7 @@ private void tryShrinkThenUnlock(long stamp) { } private void cleanBucket(int bucket) { - int nextInArray = (bucket + 2) & (table.length - 1); + int nextInArray = (bucket + ITEM_SIZE) & (table.length - 1); if (table[nextInArray] == EmptyItem) { table[bucket] = EmptyItem; table[bucket + 1] = EmptyItem; @@ -506,13 +515,13 @@ private void cleanBucket(int bucket) { // Cleanup all the buckets that were in `DeletedItem` state, // so that we can reduce unnecessary expansions - int lastBucket = (bucket - 2) & (table.length - 1); + int lastBucket = (bucket - ITEM_SIZE) & (table.length - 1); while (table[lastBucket] == DeletedItem) { table[lastBucket] = EmptyItem; table[lastBucket + 1] = EmptyItem; --usedBuckets; - lastBucket = (lastBucket - 2) & (table.length - 1); + lastBucket = (lastBucket - ITEM_SIZE) & (table.length - 1); } } else { table[bucket] = DeletedItem; @@ -542,7 +551,7 @@ public void forEach(LongPairConsumer processor) { // Go through all the buckets for this section. We try to renew the stamp only after a validation // error, otherwise we keep going with the same. long stamp = 0; - for (int bucket = 0; bucket < table.length; bucket += 2) { + for (int bucket = 0; bucket < table.length; bucket += ITEM_SIZE) { if (stamp == 0) { stamp = tryOptimisticRead(); } @@ -572,11 +581,11 @@ public void forEach(LongPairConsumer processor) { private void rehash(int newCapacity) { // Expand the hashmap - long[] newTable = new long[2 * newCapacity]; + long[] newTable = new long[ITEM_SIZE * newCapacity]; Arrays.fill(newTable, EmptyItem); // Re-hash table - for (int i = 0; i < table.length; i += 2) { + for (int i = 0; i < table.length; i += ITEM_SIZE) { long storedItem1 = table[i]; long storedItem2 = table[i + 1]; if (storedItem1 != EmptyItem && storedItem1 != DeletedItem) { @@ -595,7 +604,7 @@ private void rehash(int newCapacity) { private void shrinkToInitCapacity() { // Expand the hashmap - long[] newTable = new long[2 * initCapacity]; + long[] newTable = new long[ITEM_SIZE * initCapacity]; Arrays.fill(newTable, EmptyItem); table = newTable; @@ -621,7 +630,7 @@ private static void insertKeyValueNoLock(long[] table, int capacity, long item1, return; } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } } @@ -640,6 +649,8 @@ static final long hash(long key1, long key2) { } static final int signSafeMod(long n, int max) { + // as the ITEM_SIZE of Section is 2, so the index is the multiple of 2 + // that is to left shift 1 bit return (int) (n & (max - 1)) << 1; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java index ea2e01768ac7e..1aa95d3090eb2 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java @@ -296,6 +296,9 @@ public List values() { // A section is a portion of the hash map that is covered by a single @SuppressWarnings("serial") private static final class Section extends StampedLock { + // Each item take up 2 continuous array space. + private static final int ITEM_SIZE = 2; + // Keys and values are stored interleaved in the table array private volatile Object[] table; @@ -317,7 +320,7 @@ private static final class Section extends StampedLock { float expandFactor, float shrinkFactor) { this.capacity = alignToPowerOfTwo(capacity); this.initCapacity = this.capacity; - this.table = new Object[2 * this.capacity]; + this.table = new Object[ITEM_SIZE * this.capacity]; this.size = 0; this.usedBuckets = 0; this.autoShrink = autoShrink; @@ -332,7 +335,11 @@ private static final class Section extends StampedLock { V get(K key, int keyHash) { long stamp = tryOptimisticRead(); boolean acquiredLock = false; - int bucket = signSafeMod(keyHash, capacity); + + // add local variable here, so OutOfBound won't happen + Object[] table = this.table; + // calculate table.length / 2 as capacity to avoid rehash changing capacity + int bucket = signSafeMod(keyHash, table.length / ITEM_SIZE); try { while (true) { @@ -354,7 +361,9 @@ V get(K key, int keyHash) { stamp = readLock(); acquiredLock = true; - bucket = signSafeMod(keyHash, capacity); + // update local variable + table = this.table; + bucket = signSafeMod(keyHash, table.length / ITEM_SIZE); storedKey = (K) table[bucket]; storedValue = (V) table[bucket + 1]; } @@ -367,7 +376,7 @@ V get(K key, int keyHash) { } } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (acquiredLock) { @@ -420,7 +429,7 @@ V put(K key, V value, int keyHash, boolean onlyIfAbsent, Function valuePro } } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { if (usedBuckets > resizeThresholdUp) { @@ -449,7 +458,7 @@ private V remove(K key, Object value, int keyHash) { if (value == null || value.equals(storedValue)) { SIZE_UPDATER.decrementAndGet(this); - int nextInArray = (bucket + 2) & (table.length - 1); + int nextInArray = (bucket + ITEM_SIZE) & (table.length - 1); if (table[nextInArray] == EmptyKey) { table[bucket] = EmptyKey; table[bucket + 1] = null; @@ -457,13 +466,13 @@ private V remove(K key, Object value, int keyHash) { // Cleanup all the buckets that were in `DeletedKey` state, // so that we can reduce unnecessary expansions - int lastBucket = (bucket - 2) & (table.length - 1); + int lastBucket = (bucket - ITEM_SIZE) & (table.length - 1); while (table[lastBucket] == DeletedKey) { table[lastBucket] = EmptyKey; table[lastBucket + 1] = null; --usedBuckets; - lastBucket = (lastBucket - 2) & (table.length - 1); + lastBucket = (lastBucket - ITEM_SIZE) & (table.length - 1); } } else { table[bucket] = DeletedKey; @@ -479,7 +488,7 @@ private V remove(K key, Object value, int keyHash) { return null; } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } finally { @@ -528,7 +537,7 @@ public void forEach(BiConsumer processor) { // Go through all the buckets for this section. We try to renew the stamp only after a validation // error, otherwise we keep going with the same. long stamp = 0; - for (int bucket = 0; bucket < table.length; bucket += 2) { + for (int bucket = 0; bucket < table.length; bucket += ITEM_SIZE) { if (stamp == 0) { stamp = tryOptimisticRead(); } @@ -558,10 +567,10 @@ public void forEach(BiConsumer processor) { private void rehash(int newCapacity) { // Expand the hashmap - Object[] newTable = new Object[2 * newCapacity]; + Object[] newTable = new Object[ITEM_SIZE * newCapacity]; // Re-hash table - for (int i = 0; i < table.length; i += 2) { + for (int i = 0; i < table.length; i += ITEM_SIZE) { K storedKey = (K) table[i]; V storedValue = (V) table[i + 1]; if (storedKey != EmptyKey && storedKey != DeletedKey) { @@ -577,7 +586,7 @@ private void rehash(int newCapacity) { } private void shrinkToInitCapacity() { - Object[] newTable = new Object[2 * initCapacity]; + Object[] newTable = new Object[ITEM_SIZE * initCapacity]; table = newTable; size = 0; @@ -602,7 +611,7 @@ private static void insertKeyValueNoLock(Object[] table, int capacity, K return; } - bucket = (bucket + 2) & (table.length - 1); + bucket = (bucket + ITEM_SIZE) & (table.length - 1); } } } @@ -618,6 +627,8 @@ static final long hash(K key) { } static final int signSafeMod(long n, int max) { + // as the ITEM_SIZE of Section is 2, so the index is the multiple of 2 + // that is to left shift 1 bit return (int) (n & (max - 1)) << 1; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java index cc8bc07b43095..162cbed5a5ddc 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java @@ -294,16 +294,16 @@ private static final class Section extends StampedLock { } boolean contains(V value, int keyHash) { - int bucket = keyHash; - long stamp = tryOptimisticRead(); boolean acquiredLock = false; + // add local variable here, so OutOfBound won't happen + V[] values = this.values; + // calculate table.length as capacity to avoid rehash changing capacity + int bucket = signSafeMod(keyHash, values.length); + try { while (true) { - int capacity = this.capacity; - bucket = signSafeMod(bucket, capacity); - // First try optimistic locking V storedValue = values[bucket]; @@ -321,15 +321,12 @@ boolean contains(V value, int keyHash) { stamp = readLock(); acquiredLock = true; + // update local variable + values = this.values; + bucket = signSafeMod(keyHash, values.length); storedValue = values[bucket]; } - if (capacity != this.capacity) { - // There has been a rehashing. We need to restart the search - bucket = keyHash; - continue; - } - if (value.equals(storedValue)) { return true; } else if (storedValue == EmptyValue) { @@ -337,8 +334,7 @@ boolean contains(V value, int keyHash) { return false; } } - - ++bucket; + bucket = (bucket + 1) & (values.length - 1); } } finally { if (acquiredLock) { diff --git a/pulsar-common/src/main/resources/findbugsExclude.xml b/pulsar-common/src/main/resources/findbugsExclude.xml index df161c4b621a7..8216948c12408 100644 --- a/pulsar-common/src/main/resources/findbugsExclude.xml +++ b/pulsar-common/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/nar/NarUnpackerTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/nar/NarUnpackerTest.java index c6c5ee180f69a..f93afac2ff93b 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/nar/NarUnpackerTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/nar/NarUnpackerTest.java @@ -46,7 +46,7 @@ public class NarUnpackerTest { public void createSampleZipFile() throws IOException { sampleZipFile = Files.createTempFile("sample", ".zip").toFile(); try (ZipOutputStream out = new ZipOutputStream(new FileOutputStream(sampleZipFile))) { - for (int i = 0; i < 10000; i++) { + for (int i = 0; i < 5000; i++) { ZipEntry e = new ZipEntry("hello" + i + ".txt"); out.putNextEntry(e); byte[] msg = "hello world!".getBytes(StandardCharsets.UTF_8); @@ -58,12 +58,20 @@ public void createSampleZipFile() throws IOException { } @AfterMethod(alwaysRun = true) - void deleteSampleZipFile() throws IOException { - if (sampleZipFile != null) { - sampleZipFile.delete(); + void deleteSampleZipFile() { + if (sampleZipFile != null && sampleZipFile.exists()) { + try { + sampleZipFile.delete(); + } catch (Exception e) { + log.warn("Failed to delete file {}", sampleZipFile, e); + } } - if (extractDirectory != null) { - FileUtils.deleteFile(extractDirectory, true); + if (extractDirectory != null && extractDirectory.exists()) { + try { + FileUtils.deleteFile(extractDirectory, true); + } catch (IOException e) { + log.warn("Failed to delete directory {}", extractDirectory, e); + } } } @@ -111,7 +119,7 @@ public static void main(String[] args) { @Test void shouldExtractFilesOnceInDifferentProcess() throws InterruptedException { - int processes = 10; + int processes = 5; String javaExePath = findJavaExe().getAbsolutePath(); CountDownLatch countDownLatch = new CountDownLatch(processes); AtomicInteger exceptionCounter = new AtomicInteger(); @@ -122,7 +130,9 @@ void shouldExtractFilesOnceInDifferentProcess() throws InterruptedException { // fork a new process with the same classpath Process process = new ProcessBuilder() .command(javaExePath, - "-Xmx64m", + "-Xmx96m", + "-XX:TieredStopAtLevel=1", + "-Dlog4j2.disable.jmx=true", "-cp", System.getProperty("java.class.path"), // use NarUnpackerWorker as the main class @@ -130,6 +140,7 @@ void shouldExtractFilesOnceInDifferentProcess() throws InterruptedException { // pass arguments to use for testing sampleZipFile.getAbsolutePath(), extractDirectory.getAbsolutePath()) + .redirectErrorStream(true) .start(); String output = IOUtils.toString(process.getInputStream(), StandardCharsets.UTF_8); int retval = process.waitFor(); @@ -147,7 +158,7 @@ void shouldExtractFilesOnceInDifferentProcess() throws InterruptedException { } }).start(); } - assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS), "All processes should finish before timeout"); assertEquals(exceptionCounter.get(), 0); assertEquals(extractCounter.get(), 1); } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/OffloadPoliciesTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/OffloadPoliciesTest.java index d79d2c32ffa7f..bbede4e982044 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/OffloadPoliciesTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/OffloadPoliciesTest.java @@ -18,6 +18,8 @@ */ package org.apache.pulsar.common.policies.data; +import static org.apache.pulsar.common.policies.data.OffloadPoliciesImpl.EXTRA_CONFIG_PREFIX; +import static org.testng.Assert.assertEquals; import java.io.DataInputStream; import java.io.File; import java.io.IOException; @@ -26,6 +28,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.HashMap; import java.util.Map; import java.util.Properties; import org.testng.Assert; @@ -436,8 +439,8 @@ private byte[] loadClassData(String name) throws IOException { @Test public void testCreateOffloadPoliciesWithExtraConfiguration() { Properties properties = new Properties(); - properties.put("managedLedgerOffloadExtraConfigKey1", "value1"); - properties.put("managedLedgerOffloadExtraConfigKey2", "value2"); + properties.put(EXTRA_CONFIG_PREFIX + "Key1", "value1"); + properties.put(EXTRA_CONFIG_PREFIX + "Key2", "value2"); OffloadPoliciesImpl policies = OffloadPoliciesImpl.create(properties); Map extraConfigurations = policies.getManagedLedgerExtraConfigurations(); @@ -445,4 +448,28 @@ public void testCreateOffloadPoliciesWithExtraConfiguration() { Assert.assertEquals(extraConfigurations.get("Key1"), "value1"); Assert.assertEquals(extraConfigurations.get("Key2"), "value2"); } + + /** + * Test toProperties as well as create from properties. + * @throws Exception + */ + @Test + public void testToProperties() throws Exception { + // Base information convert. + OffloadPoliciesImpl offloadPolicies = OffloadPoliciesImpl.create("aws-s3", "test-region", "test-bucket", + "http://test.endpoint", null, null, null, null, 32 * 1024 * 1024, 5 * 1024 * 1024, + 10 * 1024 * 1024L, 100L, 10000L, OffloadedReadPriority.TIERED_STORAGE_FIRST); + assertEquals(offloadPolicies, OffloadPoliciesImpl.create(offloadPolicies.toProperties())); + + // Set useless config to offload policies. Make sure convert conversion result is the same. + offloadPolicies.setFileSystemProfilePath("/test/file"); + assertEquals(offloadPolicies, OffloadPoliciesImpl.create(offloadPolicies.toProperties())); + + // Set extra config to offload policies. Make sure convert conversion result is the same. + Map extraConfiguration = new HashMap<>(); + extraConfiguration.put("key1", "value1"); + extraConfiguration.put("key2", "value2"); + offloadPolicies.setManagedLedgerExtraConfigurations(extraConfiguration); + assertEquals(offloadPolicies, OffloadPoliciesImpl.create(offloadPolicies.toProperties())); + } } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java index 6df4494edf886..7d44c187d7355 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java @@ -181,7 +181,6 @@ public void testWaitForAny() { } } - @Test public void testSequencer() { int concurrentNum = 1000; final ScheduledExecutorService executor = Executors.newScheduledThreadPool(concurrentNum); diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java index a317fa63c0986..e1f947ad8c4f6 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java @@ -38,6 +38,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongFunction; import lombok.Cleanup; @@ -213,6 +214,66 @@ public void testExpandShrinkAndClear() { assertTrue(map.capacity() == initCapacity); } + @Test + public void testConcurrentExpandAndShrinkAndGet() throws Throwable { + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertEquals(map.capacity(), 4); + + ExecutorService executor = Executors.newCachedThreadPool(); + final int readThreads = 16; + final int writeThreads = 1; + final int n = 1_000; + CyclicBarrier barrier = new CyclicBarrier(writeThreads + readThreads); + Future future = null; + AtomicReference ex = new AtomicReference<>(); + + for (int i = 0; i < readThreads; i++) { + executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + try { + map.get(1); + } catch (Exception e) { + ex.set(e); + } + }); + } + + assertNull(map.put(1,"v1")); + future = executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + for (int i = 0; i < n; i++) { + // expand hashmap + assertNull(map.put(2, "v2")); + assertNull(map.put(3, "v3")); + assertEquals(map.capacity(), 8); + + // shrink hashmap + assertTrue(map.remove(2, "v2")); + assertTrue(map.remove(3, "v3")); + assertEquals(map.capacity(), 4); + } + }); + + future.get(); + assertTrue(ex.get() == null); + // shut down pool + executor.shutdown(); + } + @Test public void testRemove() { ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() @@ -361,7 +422,6 @@ public void concurrentInsertionsAndReads() throws Throwable { assertEquals(map.size(), N * nThreads); } - @Test public void stressConcurrentInsertionsAndReads() throws Throwable { ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() .expectedItems(4) diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java index 8e74d285ffb9b..0de3fdb5c84bf 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java @@ -31,9 +31,12 @@ import java.util.List; import java.util.Map; import java.util.Random; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicReference; + import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap.LongPair; import org.testng.annotations.Test; @@ -173,6 +176,68 @@ public void testExpandAndShrink() { assertEquals(map.capacity(), 8); } + @Test + public void testConcurrentExpandAndShrinkAndGet() throws Throwable { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertEquals(map.capacity(), 4); + + ExecutorService executor = Executors.newCachedThreadPool(); + final int readThreads = 16; + final int writeThreads = 1; + final int n = 1_000; + CyclicBarrier barrier = new CyclicBarrier(writeThreads + readThreads); + Future future = null; + AtomicReference ex = new AtomicReference<>(); + + for (int i = 0; i < readThreads; i++) { + executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + while (true) { + try { + map.get(1, 1); + } catch (Exception e) { + ex.set(e); + } + } + }); + } + + assertTrue(map.put(1, 1, 11, 11)); + future = executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + for (int i = 0; i < n; i++) { + // expand hashmap + assertTrue(map.put(2, 2, 22, 22)); + assertTrue(map.put(3, 3, 33, 33)); + assertEquals(map.capacity(), 8); + + // shrink hashmap + assertTrue(map.remove(2, 2, 22, 22)); + assertTrue(map.remove(3, 3, 33, 33)); + assertEquals(map.capacity(), 4); + } + }); + + future.get(); + assertTrue(ex.get() == null); + // shut down pool + executor.shutdown(); + } + @Test public void testExpandShrinkAndClear() { ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java index 7e947ae6e6aa3..bce2b8993835f 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java @@ -30,9 +30,11 @@ import java.util.List; import java.util.Random; import java.util.Set; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicReference; import lombok.Cleanup; import org.apache.pulsar.common.util.collections.ConcurrentLongPairSet.LongPair; @@ -211,6 +213,68 @@ public void testExpandShrinkAndClear() { assertTrue(map.capacity() == initCapacity); } + @Test + public void testConcurrentExpandAndShrinkAndGet() throws Throwable { + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertEquals(set.capacity(), 4); + + ExecutorService executor = Executors.newCachedThreadPool(); + final int readThreads = 16; + final int writeThreads = 1; + final int n = 1_000; + CyclicBarrier barrier = new CyclicBarrier(writeThreads + readThreads); + Future future = null; + AtomicReference ex = new AtomicReference<>(); + + for (int i = 0; i < readThreads; i++) { + executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + while (true) { + try { + set.contains(1, 1); + } catch (Exception e) { + ex.set(e); + } + } + }); + } + + assertTrue(set.add(1, 1)); + future = executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + for (int i = 0; i < n; i++) { + // expand hashmap + assertTrue(set.add(2, 2)); + assertTrue(set.add(3, 3)); + assertEquals(set.capacity(), 8); + + // shrink hashmap + assertTrue(set.remove(2, 2)); + assertTrue(set.remove(3, 3)); + assertEquals(set.capacity(), 4); + } + }); + + future.get(); + assertTrue(ex.get() == null); + // shut down pool + executor.shutdown(); + } + @Test public void testRemove() { diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java index 198a3f4c5c38b..410d490b98faa 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java @@ -32,11 +32,13 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import lombok.Cleanup; @@ -215,6 +217,68 @@ public void testExpandShrinkAndClear() { assertTrue(map.capacity() == initCapacity); } + @Test + public void testConcurrentExpandAndShrinkAndGet() throws Throwable { + ConcurrentOpenHashMap map = ConcurrentOpenHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertEquals(map.capacity(), 4); + + ExecutorService executor = Executors.newCachedThreadPool(); + final int readThreads = 16; + final int writeThreads = 1; + final int n = 1_000; + CyclicBarrier barrier = new CyclicBarrier(writeThreads + readThreads); + Future future = null; + AtomicReference ex = new AtomicReference<>(); + + for (int i = 0; i < readThreads; i++) { + executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + while (true) { + try { + map.get("k2"); + } catch (Exception e) { + ex.set(e); + } + } + }); + } + + assertNull(map.put("k1","v1")); + future = executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + for (int i = 0; i < n; i++) { + // expand hashmap + assertNull(map.put("k2", "v2")); + assertNull(map.put("k3", "v3")); + assertEquals(map.capacity(), 8); + + // shrink hashmap + assertTrue(map.remove("k2", "v2")); + assertTrue(map.remove("k3", "v3")); + assertEquals(map.capacity(), 4); + } + }); + + future.get(); + assertTrue(ex.get() == null); + // shut down pool + executor.shutdown(); + } + @Test public void testRemove() { ConcurrentOpenHashMap map = diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java index 27c18abb8b347..6a40095ab0647 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java @@ -28,9 +28,11 @@ import java.util.Collections; import java.util.List; import java.util.Random; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicReference; import lombok.Cleanup; import org.testng.annotations.Test; @@ -186,6 +188,68 @@ public void testExpandShrinkAndClear() { assertTrue(map.capacity() == initCapacity); } + @Test + public void testConcurrentExpandAndShrinkAndGet() throws Throwable { + ConcurrentOpenHashSet set = ConcurrentOpenHashSet.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertEquals(set.capacity(), 4); + + ExecutorService executor = Executors.newCachedThreadPool(); + final int readThreads = 16; + final int writeThreads = 1; + final int n = 1_000; + CyclicBarrier barrier = new CyclicBarrier(writeThreads + readThreads); + Future future = null; + AtomicReference ex = new AtomicReference<>(); + + for (int i = 0; i < readThreads; i++) { + executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + while (true) { + try { + set.contains("k2"); + } catch (Exception e) { + ex.set(e); + } + } + }); + } + + assertTrue(set.add("k1")); + future = executor.submit(() -> { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + for (int i = 0; i < n; i++) { + // expand hashmap + assertTrue(set.add("k2")); + assertTrue(set.add("k3")); + assertEquals(set.capacity(), 8); + + // shrink hashmap + assertTrue(set.remove("k2")); + assertTrue(set.remove("k3")); + assertEquals(set.capacity(), 4); + } + }); + + future.get(); + assertTrue(ex.get() == null); + // shut down pool + executor.shutdown(); + } + @Test public void testRemove() { ConcurrentOpenHashSet set = diff --git a/pulsar-config-validation/pom.xml b/pulsar-config-validation/pom.xml index 9c7ab1061b70b..2a84bfd830350 100644 --- a/pulsar-config-validation/pom.xml +++ b/pulsar-config-validation/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - org.apache.pulsar - pulsar - 3.1.0-SNAPSHOT - .. - - - pulsar-config-validation - Annotation based config validation for Pulsar - - - - org.slf4j - slf4j-api - - - - - - - org.gaul - modernizer-maven-plugin - - true - 17 - - - - modernizer - verify - - modernizer - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - - \ No newline at end of file + + 4.0.0 + + io.streamnative + pulsar + 3.1.0-SNAPSHOT + .. + + pulsar-config-validation + Annotation based config validation for Pulsar + + + org.slf4j + slf4j-api + + + + + + org.gaul + modernizer-maven-plugin + + true + 17 + + + + modernizer + verify + + modernizer + + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + verify + + check + + + + + + + diff --git a/pulsar-docs-tools/pom.xml b/pulsar-docs-tools/pom.xml index 9074ad0d6c591..b1e631d994d02 100644 --- a/pulsar-docs-tools/pom.xml +++ b/pulsar-docs-tools/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar - pulsar - 3.1.0-SNAPSHOT - - - pulsar-docs-tools - Pulsar Documentation Generators - - - - io.swagger - swagger-annotations - - - io.swagger - swagger-core - - - com.beust - jcommander - - - + + 4.0.0 + + io.streamnative + pulsar + 3.1.0-SNAPSHOT + + pulsar-docs-tools + Pulsar Documentation Generators + + + io.swagger + swagger-annotations + + + io.swagger + swagger-core + + + com.beust + jcommander + + diff --git a/pulsar-functions/api-java/pom.xml b/pulsar-functions/api-java/pom.xml index 7735a0b142745..f0bc56e948bc2 100644 --- a/pulsar-functions/api-java/pom.xml +++ b/pulsar-functions/api-java/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-functions 3.1.0-SNAPSHOT - pulsar-functions-api Pulsar Functions :: API - - org.slf4j slf4j-api - net.jodah typetools @@ -48,16 +44,13 @@ ${project.version} compile - ${project.groupId} pulsar-client-admin-api ${project.version} compile - - @@ -81,16 +74,15 @@ org.apache.maven.plugins maven-checkstyle-plugin - - checkstyle - verify - - check - - + + checkstyle + verify + + check + + - diff --git a/pulsar-functions/api-java/src/main/resources/findbugsExclude.xml b/pulsar-functions/api-java/src/main/resources/findbugsExclude.xml index 9638cfcca8da9..b74af4c8ceb2a 100644 --- a/pulsar-functions/api-java/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/api-java/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-functions 3.1.0-SNAPSHOT - pulsar-functions-instance Pulsar Functions :: Instance - - org.apache.logging.log4j @@ -46,55 +42,46 @@ org.apache.logging.log4j log4j-core - ${project.groupId} pulsar-functions-utils ${project.version} - ${project.groupId} pulsar-metadata ${project.version} - ${project.groupId} pulsar-io-core ${project.version} - ${project.groupId} pulsar-functions-api ${project.version} - ${project.groupId} pulsar-functions-secrets ${project.version} - - + ${project.groupId} pulsar-client-admin-original ${project.version} - - + ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} pulsar-client-messagecrypto-bc ${project.parent.version} - org.apache.bookkeeper stream-storage-java-client @@ -109,24 +96,20 @@ - io.grpc grpc-stub - io.grpc grpc-all - io.perfmark perfmark-api runtime - org.apache.bookkeeper bookkeeper-common @@ -141,61 +124,50 @@ - com.yahoo.datasketches sketches-core - com.google.guava guava - com.beust jcommander - net.jodah typetools - io.prometheus simpleclient ${prometheus.version} - io.prometheus simpleclient_hotspot ${prometheus.version} - io.prometheus simpleclient_httpserver ${prometheus.version} - io.prometheus.jmx collector ${prometheus-jmx.version} - org.awaitility awaitility test - - @@ -215,7 +187,6 @@ - @@ -271,5 +242,4 @@ - diff --git a/pulsar-functions/instance/src/main/resources/findbugsExclude.xml b/pulsar-functions/instance/src/main/resources/findbugsExclude.xml index 7fe247d2ab20a..4c04f27f66fbe 100644 --- a/pulsar-functions/instance/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/instance/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-functions 3.1.0-SNAPSHOT - pulsar-functions-api-examples-builtin Pulsar Functions :: API Examples (NAR) - ${project.groupId} @@ -37,14 +35,36 @@ ${project.version} - org.apache.nifi nifi-nar-maven-plugin + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + - diff --git a/pulsar-functions/java-examples/pom.xml b/pulsar-functions/java-examples/pom.xml index b383295423f40..93afe47845908 100644 --- a/pulsar-functions/java-examples/pom.xml +++ b/pulsar-functions/java-examples/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-functions 3.1.0-SNAPSHOT - pulsar-functions-api-examples Pulsar Functions :: API Examples - org.slf4j slf4j-api - ${project.groupId} pulsar-functions-api @@ -52,7 +49,6 @@ ${project.version} - @@ -87,5 +83,4 @@ - diff --git a/pulsar-functions/java-examples/src/main/resources/findbugsExclude.xml b/pulsar-functions/java-examples/src/main/resources/findbugsExclude.xml index ddde8120ba518..47c3d73af5f06 100644 --- a/pulsar-functions/java-examples/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/java-examples/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-functions/localrun-shaded/pom.xml b/pulsar-functions/localrun-shaded/pom.xml index 37e9d40249d49..10e3d1f6d6d37 100644 --- a/pulsar-functions/localrun-shaded/pom.xml +++ b/pulsar-functions/localrun-shaded/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar - pulsar-functions - 3.1.0-SNAPSHOT - .. - - - pulsar-functions-local-runner - Pulsar Functions :: Local Runner Shaded - - - - ${project.groupId} - pulsar-functions-local-runner-original - ${project.parent.version} - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - unpack - prepare-package - - unpack - - - - - org.asynchttpclient - async-http-client - ${asynchttpclient.version} - jar - true - org/asynchttpclient/config/ahc-default.properties - ${project.build.directory}/classes - - - - - - - - - maven-antrun-plugin - - - shade-ahc-properties - prepare-package - - run - - - - - - - - - - - - - org.apache.maven.plugins - maven-shade-plugin - - - ${shadePluginPhase} - - shade - - - false - - - - - - - org.apache.pulsar:* - org.apache.bookkeeper:* - commons-*:* - org.apache.commons:* - com.fasterxml.jackson.*:* - io.netty:* - com.google.*:* - javax.servlet:* - org.reactivestreams:reactive-streams - org.apache.commons:* - io.swagger:* - org.yaml:snakeyaml - io.perfmark:* - io.prometheus:* - io.prometheus.jmx:* - javax.ws.rs:* - org.tukaani:xz - com.github.zafarkhaja:java-semver - net.java.dev.jna:* - org.apache.zookeeper:* - com.thoughtworks.paranamer:paranamer - jline:* - org.rocksdb:* - org.eclipse.jetty*:* - org.apache.avro:avro - com.beust:* - net.jodah:* - io.airlift:* - com.yahoo.datasketches:* - io.netty.resolver:* - - - - - org.apache.pulsar:pulsar-client-original - - ** - - - - org/bouncycastle/** - - - - - - + + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + ${shadePluginPhase} + + shade + + + false + + + + + + + io.streamnative:* + org.apache.bookkeeper:* + commons-*:* + org.apache.commons:* + com.fasterxml.jackson.*:* + io.netty:* + com.google.*:* + javax.servlet:* + org.reactivestreams:reactive-streams + org.apache.commons:* + io.swagger:* + org.yaml:snakeyaml + io.perfmark:* + io.prometheus:* + io.prometheus.jmx:* + javax.ws.rs:* + org.tukaani:xz + com.github.zafarkhaja:java-semver + net.java.dev.jna:* + org.apache.zookeeper:* + com.thoughtworks.paranamer:paranamer + jline:* + org.rocksdb:* + org.eclipse.jetty*:* + org.apache.avro:avro + com.beust:* + net.jodah:* + io.airlift:* + com.yahoo.datasketches:* + io.netty.resolver:* + + + + + io.streamnative:pulsar-client-original + + ** + + + + org/bouncycastle/** + + + + + + - - com.google - org.apache.pulsar.functions.runtime.shaded.com.google - - - org.apache.jute - org.apache.pulsar.functions.runtime.shaded.org.apache.jute - - - javax.servlet - org.apache.pulsar.functions.runtime.shaded.javax.servlet - - - net.jodah - org.apache.pulsar.functions.runtime.shaded.net.jodah - - - org.lz4 - org.apache.pulsar.functions.runtime.shaded.org.lz4 - - - org.reactivestreams - org.apache.pulsar.functions.runtime.shaded.org.reactivestreams - - - org.apache.commons - org.apache.pulsar.functions.runtime.shaded.org.apache.commons - - - io.swagger - org.apache.pulsar.functions.runtime.shaded.io.swagger - - - org.yaml - org.apache.pulsar.functions.runtime.shaded.org.yaml - - - org.jctools - org.apache.pulsar.functions.runtime.shaded.org.jctools - - - com.squareup.okhttp - org.apache.pulsar.functions.runtime.shaded.com.squareup.okhttp - - - io.grpc - org.apache.pulsar.functions.runtime.shaded.io.grpc - - - io.perfmark - org.apache.pulsar.functions.runtime.shaded.io.perfmark - - - org.joda - org.apache.pulsar.functions.runtime.shaded.org.joda - - - javax.ws.rs - org.apache.pulsar.functions.runtime.shaded.javax.ws.rs - - - io.kubernetes - org.apache.pulsar.functions.runtime.shaded.io.kubernetes - - - io.opencensus - org.apache.pulsar.functions.runtime.shaded.io.opencensus - - - net.jpountz - org.apache.pulsar.functions.runtime.shaded.net.jpountz - - - commons-configuration - org.apache.pulsar.functions.runtime.shaded.commons-configuration - - - org.tukaani - org.apache.pulsar.functions.runtime.shaded.org.tukaani - - - com.github - org.apache.pulsar.functions.runtime.shaded.com.github - - - commons-io - org.apache.pulsar.functions.runtime.shaded.commons-io - - - org.apache.distributedlog - org.apache.pulsar.functions.runtime.shaded.org.apache.distributedlog - - - - com.fasterxml - org.apache.pulsar.functions.runtime.shaded.com.fasterxml - - - org.inferred - org.apache.pulsar.functions.runtime.shaded.org.inferred - - - org.apache.bookkeeper - org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper - - - org.bookkeeper - org.apache.pulsar.functions.runtime.shaded.org.bookkeeper - - - dlshade - org.apache.pulsar.functions.runtime.shaded.dlshade - - - - org.codehaus.jackson - org.apache.pulsar.functions.runtime.shaded.org.codehaus.jackson - - - net.java.dev.jna - org.apache.pulsar.functions.runtime.shaded.net.java.dev.jna - - - org.apache.curator - org.apache.pulsar.functions.runtime.shaded.org.apache.curator - - - javax.validation - org.apache.pulsar.functions.runtime.shaded.javax.validation - - - javax.activation - org.apache.pulsar.functions.runtime.shaded.javax.activation - - - io.prometheus - org.apache.pulsar.functions.runtime.shaded.io.prometheus - - - io.prometheus.jmx - org.apache.pulsar.functions.runtime.shaded.io.prometheus.jmx - - - org.apache.zookeeper - org.apache.pulsar.functions.runtime.shaded.org.apache.zookeeper - - - io.jsonwebtoken - org.apache.pulsar.functions.runtime.shaded.io.jsonwebtoken - - - commons-codec - org.apache.pulsar.functions.runtime.shaded.commons-codec - - - com.thoughtworks.paranamer - org.apache.pulsar.functions.runtime.shaded.com.thoughtworks.paranamer - - - org.codehaus.mojo - org.apache.pulsar.functions.runtime.shaded.org.codehaus.mojo - - - com.github.luben - org.apache.pulsar.functions.runtime.shaded.com.github.luben - - - jline - org.apache.pulsar.functions.runtime.shaded.jline - - - org.xerial.snappy - org.apache.pulsar.functions.runtime.shaded.org.xerial.snappy - - - javax.annotation - org.apache.pulsar.functions.runtime.shaded.javax.annotation - - - org.checkerframework - org.apache.pulsar.functions.runtime.shaded.org.checkerframework - - - org.apache.yetus - org.apache.pulsar.functions.runtime.shaded.org.apache.yetus - - - commons-cli - org.apache.pulsar.functions.runtime.shaded.commons-cli - - - commons-lang - org.apache.pulsar.functions.runtime.shaded.commons-lang - - - com.squareup.okio - org.apache.pulsar.functions.runtime.shaded.com.squareup.okio - - - org.rocksdb - org.apache.pulsar.functions.runtime.shaded.org.rocksdb - - - org.objenesis - org.apache.pulsar.functions.runtime.shaded.org.objenesis - - - org.eclipse.jetty - org.apache.pulsar.functions.runtime.shaded.org.eclipse.jetty - - - org.apache.avro - org.apache.pulsar.functions.runtime.shaded.org.apache.avro - - - avro.shaded - org.apache.pulsar.functions.runtime.shaded.avo.shaded - - - com.yahoo.datasketches - org.apache.pulsar.shaded.com.yahoo.datasketches - - - com.yahoo.sketches - org.apache.pulsar.shaded.com.yahoo.sketches - - - com.beust - org.apache.pulsar.functions.runtime.shaded.com.beust - - - - io.netty - org.apache.pulsar.functions.runtime.shaded.io.netty - - - org.hamcrest - org.apache.pulsar.functions.runtime.shaded.org.hamcrest - - - aj.org - org.apache.pulsar.functions.runtime.shaded.aj.org - - - com.scurrilous - org.apache.pulsar.functions.runtime.shaded.com.scurrilous - - - okio - org.apache.pulsar.functions.runtime.shaded.okio - - + + com.fasterxml + org.apache.pulsar.functions.runtime.shaded.com.fasterxml + + + org.inferred + org.apache.pulsar.functions.runtime.shaded.org.inferred + + + org.apache.bookkeeper + org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper + + + org.bookkeeper + org.apache.pulsar.functions.runtime.shaded.org.bookkeeper + + + dlshade + org.apache.pulsar.functions.runtime.shaded.dlshade + + + + org.codehaus.jackson + org.apache.pulsar.functions.runtime.shaded.org.codehaus.jackson + + + net.java.dev.jna + org.apache.pulsar.functions.runtime.shaded.net.java.dev.jna + + + org.apache.curator + org.apache.pulsar.functions.runtime.shaded.org.apache.curator + + + javax.validation + org.apache.pulsar.functions.runtime.shaded.javax.validation + + + javax.activation + org.apache.pulsar.functions.runtime.shaded.javax.activation + + + io.prometheus + org.apache.pulsar.functions.runtime.shaded.io.prometheus + + + io.prometheus.jmx + org.apache.pulsar.functions.runtime.shaded.io.prometheus.jmx + + + org.apache.zookeeper + org.apache.pulsar.functions.runtime.shaded.org.apache.zookeeper + + + io.jsonwebtoken + org.apache.pulsar.functions.runtime.shaded.io.jsonwebtoken + + + commons-codec + org.apache.pulsar.functions.runtime.shaded.commons-codec + + + com.thoughtworks.paranamer + org.apache.pulsar.functions.runtime.shaded.com.thoughtworks.paranamer + + + org.codehaus.mojo + org.apache.pulsar.functions.runtime.shaded.org.codehaus.mojo + + + com.github.luben + org.apache.pulsar.functions.runtime.shaded.com.github.luben + + + jline + org.apache.pulsar.functions.runtime.shaded.jline + + + org.xerial.snappy + org.apache.pulsar.functions.runtime.shaded.org.xerial.snappy + + + javax.annotation + org.apache.pulsar.functions.runtime.shaded.javax.annotation + + + org.checkerframework + org.apache.pulsar.functions.runtime.shaded.org.checkerframework + + + org.apache.yetus + org.apache.pulsar.functions.runtime.shaded.org.apache.yetus + + + commons-cli + org.apache.pulsar.functions.runtime.shaded.commons-cli + + + commons-lang + org.apache.pulsar.functions.runtime.shaded.commons-lang + + + com.squareup.okio + org.apache.pulsar.functions.runtime.shaded.com.squareup.okio + + + org.rocksdb + org.apache.pulsar.functions.runtime.shaded.org.rocksdb + + + org.objenesis + org.apache.pulsar.functions.runtime.shaded.org.objenesis + + + org.eclipse.jetty + org.apache.pulsar.functions.runtime.shaded.org.eclipse.jetty + + + org.apache.avro + org.apache.pulsar.functions.runtime.shaded.org.apache.avro + + + avro.shaded + org.apache.pulsar.functions.runtime.shaded.avo.shaded + + + com.yahoo.datasketches + org.apache.pulsar.shaded.com.yahoo.datasketches + + + com.yahoo.sketches + org.apache.pulsar.shaded.com.yahoo.sketches + + + com.beust + org.apache.pulsar.functions.runtime.shaded.com.beust + + + + io.netty + org.apache.pulsar.functions.runtime.shaded.io.netty + + + org.hamcrest + org.apache.pulsar.functions.runtime.shaded.org.hamcrest + + + aj.org + org.apache.pulsar.functions.runtime.shaded.aj.org + + + com.scurrilous + org.apache.pulsar.functions.runtime.shaded.com.scurrilous + + + okio + org.apache.pulsar.functions.runtime.shaded.okio + + - - org.asynchttpclient - org.apache.pulsar.functions.runtime.shaded.org.asynchttpclient - - - io.airlift - org.apache.pulsar.functions.runtime.shaded.io.airlift - - - - - - - - - + + + + +
    + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + +
    +
    diff --git a/pulsar-functions/localrun/pom.xml b/pulsar-functions/localrun/pom.xml index 5779695eae8e4..ac0364bf97d91 100644 --- a/pulsar-functions/localrun/pom.xml +++ b/pulsar-functions/localrun/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar - pulsar-functions - 3.1.0-SNAPSHOT - .. - - - pulsar-functions-local-runner-original - Pulsar Functions :: Local Runner original - - - - ${project.groupId} - pulsar-functions-runtime - ${project.parent.version} - - - - - org.apache.logging.log4j - log4j-slf4j-impl - - - org.apache.logging.log4j - log4j-api - - - org.apache.logging.log4j - log4j-core - - - - io.grpc - grpc-all - - - - io.perfmark - perfmark-api - runtime - - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - check - verify - - check - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - + + 4.0.0 + + io.streamnative + pulsar-functions + 3.1.0-SNAPSHOT + .. + + pulsar-functions-local-runner-original + Pulsar Functions :: Local Runner original + + + ${project.groupId} + pulsar-functions-runtime + ${project.parent.version} + + + + org.apache.logging.log4j + log4j-slf4j-impl + + + org.apache.logging.log4j + log4j-api + + + org.apache.logging.log4j + log4j-core + + + io.grpc + grpc-all + + + io.perfmark + perfmark-api + runtime + + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + check + verify + + check + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + verify + + check + + + + + + diff --git a/pulsar-functions/localrun/src/main/java/org/apache/pulsar/functions/LocalRunner.java b/pulsar-functions/localrun/src/main/java/org/apache/pulsar/functions/LocalRunner.java index ed9b0af3b43d8..711fa33edb2a2 100644 --- a/pulsar-functions/localrun/src/main/java/org/apache/pulsar/functions/LocalRunner.java +++ b/pulsar-functions/localrun/src/main/java/org/apache/pulsar/functions/LocalRunner.java @@ -52,7 +52,9 @@ import lombok.Value; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.FunctionDefinition; import org.apache.pulsar.common.functions.Utils; +import org.apache.pulsar.common.io.ConnectorDefinition; import org.apache.pulsar.common.io.SinkConfig; import org.apache.pulsar.common.io.SourceConfig; import org.apache.pulsar.common.nar.FileUtils; @@ -75,8 +77,11 @@ import org.apache.pulsar.functions.secretsproviderconfigurator.SecretsProviderConfigurator; import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.FunctionConfigUtils; +import org.apache.pulsar.functions.utils.FunctionRuntimeCommon; +import org.apache.pulsar.functions.utils.LoadedFunctionPackage; import org.apache.pulsar.functions.utils.SinkConfigUtils; import org.apache.pulsar.functions.utils.SourceConfigUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.functioncache.FunctionCacheEntry; import org.apache.pulsar.functions.utils.functions.FunctionArchive; import org.apache.pulsar.functions.utils.functions.FunctionUtils; @@ -357,9 +362,12 @@ public void start(boolean blocking) throws Exception { userCodeFile = functionConfig.getJar(); userCodeClassLoader = extractClassLoader( userCodeFile, ComponentType.FUNCTION, functionConfig.getClassName()); + ValidatableFunctionPackage validatableFunctionPackage = + new LoadedFunctionPackage(getCurrentOrUserCodeClassLoader(), + FunctionDefinition.class); functionDetails = FunctionConfigUtils.convert( functionConfig, - FunctionConfigUtils.validateJavaFunction(functionConfig, getCurrentOrUserCodeClassLoader())); + FunctionConfigUtils.validateJavaFunction(functionConfig, validatableFunctionPackage)); } else if (functionConfig.getRuntime() == FunctionConfig.Runtime.GO) { userCodeFile = functionConfig.getGo(); } else if (functionConfig.getRuntime() == FunctionConfig.Runtime.PYTHON) { @@ -369,7 +377,10 @@ public void start(boolean blocking) throws Exception { } if (functionDetails == null) { - functionDetails = FunctionConfigUtils.convert(functionConfig, getCurrentOrUserCodeClassLoader()); + ValidatableFunctionPackage validatableFunctionPackage = + new LoadedFunctionPackage(getCurrentOrUserCodeClassLoader(), + FunctionDefinition.class); + functionDetails = FunctionConfigUtils.convert(functionConfig, validatableFunctionPackage); } } else if (sourceConfig != null) { inferMissingArguments(sourceConfig); @@ -377,9 +388,10 @@ public void start(boolean blocking) throws Exception { parallelism = sourceConfig.getParallelism(); userCodeClassLoader = extractClassLoader( userCodeFile, ComponentType.SOURCE, sourceConfig.getClassName()); - functionDetails = SourceConfigUtils.convert( - sourceConfig, - SourceConfigUtils.validateAndExtractDetails(sourceConfig, getCurrentOrUserCodeClassLoader(), true)); + ValidatableFunctionPackage validatableFunctionPackage = + new LoadedFunctionPackage(getCurrentOrUserCodeClassLoader(), ConnectorDefinition.class); + functionDetails = SourceConfigUtils.convert(sourceConfig, + SourceConfigUtils.validateAndExtractDetails(sourceConfig, validatableFunctionPackage, true)); } else if (sinkConfig != null) { inferMissingArguments(sinkConfig); userCodeFile = sinkConfig.getArchive(); @@ -387,6 +399,8 @@ public void start(boolean blocking) throws Exception { parallelism = sinkConfig.getParallelism(); userCodeClassLoader = extractClassLoader( userCodeFile, ComponentType.SINK, sinkConfig.getClassName()); + ValidatableFunctionPackage validatableFunctionPackage = + new LoadedFunctionPackage(getCurrentOrUserCodeClassLoader(), ConnectorDefinition.class); if (isNotEmpty(sinkConfig.getTransformFunction())) { transformFunctionCodeClassLoader = extractClassLoader( sinkConfig.getTransformFunction(), @@ -395,16 +409,19 @@ public void start(boolean blocking) throws Exception { } ClassLoader functionClassLoader = null; + ValidatableFunctionPackage validatableTransformFunction = null; if (transformFunctionCodeClassLoader != null) { functionClassLoader = transformFunctionCodeClassLoader.getClassLoader() == null ? Thread.currentThread().getContextClassLoader() : transformFunctionCodeClassLoader.getClassLoader(); + validatableTransformFunction = + new LoadedFunctionPackage(functionClassLoader, FunctionDefinition.class); } functionDetails = SinkConfigUtils.convert( sinkConfig, - SinkConfigUtils.validateAndExtractDetails(sinkConfig, getCurrentOrUserCodeClassLoader(), - functionClassLoader, true)); + SinkConfigUtils.validateAndExtractDetails(sinkConfig, validatableFunctionPackage, + validatableTransformFunction, true)); } else { throw new IllegalArgumentException("Must specify Function, Source or Sink config"); } @@ -472,7 +489,7 @@ private UserCodeClassLoader extractClassLoader(String userCodeFile, ComponentTyp if (classLoader == null) { if (userCodeFile != null && Utils.isFunctionPackageUrlSupported(userCodeFile)) { File file = FunctionCommon.extractFileFromPkgURL(userCodeFile); - classLoader = FunctionCommon.getClassLoaderFromPackage( + classLoader = FunctionRuntimeCommon.getClassLoaderFromPackage( componentType, className, file, narExtractionDirectory); classLoaderCreated = true; } else if (userCodeFile != null) { @@ -494,7 +511,7 @@ private UserCodeClassLoader extractClassLoader(String userCodeFile, ComponentTyp } throw new RuntimeException(errorMsg + " (" + userCodeFile + ") does not exist"); } - classLoader = FunctionCommon.getClassLoaderFromPackage( + classLoader = FunctionRuntimeCommon.getClassLoaderFromPackage( componentType, className, file, narExtractionDirectory); classLoaderCreated = true; } else { @@ -713,7 +730,7 @@ private ClassLoader isBuiltInFunction(String functionType) throws IOException { FunctionArchive function = functions.get(functionName); if (function != null && function.getFunctionDefinition().getFunctionClass() != null) { // Function type is a valid built-in type. - return function.getClassLoader(); + return function.getFunctionPackage().getClassLoader(); } else { return null; } @@ -727,7 +744,7 @@ private ClassLoader isBuiltInSource(String sourceType) throws IOException { Connector connector = connectors.get(source); if (connector != null && connector.getConnectorDefinition().getSourceClass() != null) { // Source type is a valid built-in connector type. - return connector.getClassLoader(); + return connector.getConnectorFunctionPackage().getClassLoader(); } else { return null; } @@ -741,18 +758,18 @@ private ClassLoader isBuiltInSink(String sinkType) throws IOException { Connector connector = connectors.get(sink); if (connector != null && connector.getConnectorDefinition().getSinkClass() != null) { // Sink type is a valid built-in connector type - return connector.getClassLoader(); + return connector.getConnectorFunctionPackage().getClassLoader(); } else { return null; } } private TreeMap getFunctions() throws IOException { - return FunctionUtils.searchForFunctions(functionsDir); + return FunctionUtils.searchForFunctions(functionsDir, narExtractionDirectory, true); } private TreeMap getConnectors() throws IOException { - return ConnectorUtils.searchForConnectors(connectorsDir, narExtractionDirectory); + return ConnectorUtils.searchForConnectors(connectorsDir, narExtractionDirectory, true); } private SecretsProviderConfigurator getSecretsProviderConfigurator() { diff --git a/pulsar-functions/localrun/src/main/resources/findbugsExclude.xml b/pulsar-functions/localrun/src/main/resources/findbugsExclude.xml index ad183eb5d9427..c7c41fe9ea252 100644 --- a/pulsar-functions/localrun/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/localrun/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - - \ No newline at end of file + + + + + + + + + + + diff --git a/pulsar-functions/pom.xml b/pulsar-functions/pom.xml index 1d9973a4d41a8..c72e15ac1421f 100644 --- a/pulsar-functions/pom.xml +++ b/pulsar-functions/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT - pulsar-functions Pulsar Functions :: Parent - main @@ -41,24 +39,23 @@ !true - - proto - api-java - java-examples - java-examples-builtin - utils - instance - runtime - runtime-all - worker - secrets - localrun - localrun-shaded - + + proto + api-java + java-examples + java-examples-builtin + utils + instance + runtime + runtime-all + worker + secrets + localrun + localrun-shaded + - - - core-modules + + core-modules proto api-java @@ -72,8 +69,6 @@ secrets localrun - - + - diff --git a/pulsar-functions/proto/pom.xml b/pulsar-functions/proto/pom.xml index b84b2d9181a38..c56e4bbf833dd 100644 --- a/pulsar-functions/proto/pom.xml +++ b/pulsar-functions/proto/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar - pulsar-functions - 3.1.0-SNAPSHOT - - - pulsar-functions-proto - Pulsar Functions :: Proto - - - - - com.google.protobuf - protobuf-java - - - - com.google.protobuf - protobuf-java-util - - - - javax.annotation - javax.annotation-api - - - - io.grpc - grpc-all - - - io.netty - * - - - - - - io.perfmark - perfmark-api - runtime - - - - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - ${protobuf-maven-plugin.version} - - com.google.protobuf:protoc:${protoc3.version}:exe:${os.detected.classifier} - true - grpc-java - io.grpc:protoc-gen-grpc-java:${protoc-gen-grpc-java.version}:exe:${os.detected.classifier} - - - - generate-sources - - compile - compile-custom - - - - - - - + + 4.0.0 + + io.streamnative + pulsar-functions + 3.1.0-SNAPSHOT + + pulsar-functions-proto + Pulsar Functions :: Proto + + + com.google.protobuf + protobuf-java + + + com.google.protobuf + protobuf-java-util + + + javax.annotation + javax.annotation-api + + + io.grpc + grpc-all + + + io.netty + * + + + + + io.perfmark + perfmark-api + runtime + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + ${protobuf-maven-plugin.version} + + com.google.protobuf:protoc:${protoc3.version}:exe:${os.detected.classifier} + true + grpc-java + io.grpc:protoc-gen-grpc-java:${protoc-gen-grpc-java.version}:exe:${os.detected.classifier} + + + + generate-sources + + compile + compile-custom + + + + + + diff --git a/pulsar-functions/runtime-all/pom.xml b/pulsar-functions/runtime-all/pom.xml index 55e4009db077e..eb8ae5646ada7 100644 --- a/pulsar-functions/runtime-all/pom.xml +++ b/pulsar-functions/runtime-all/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-functions 3.1.0-SNAPSHOT .. - - pulsar-functions-runtime-all Pulsar Functions :: Runtime All - ${project.groupId} pulsar-io-core ${project.version} - ${project.groupId} pulsar-functions-api ${project.version} - ${project.groupId} pulsar-client-api ${project.version} - org.apache.avro avro ${avro.version} - com.fasterxml.jackson.core jackson-databind - com.google.protobuf @@ -95,30 +85,24 @@ gson ${gson.version} - - org.slf4j slf4j-api - org.apache.logging.log4j log4j-slf4j-impl - org.apache.logging.log4j log4j-api - org.apache.logging.log4j log4j-core - - - - - - - - - - - \ No newline at end of file + + + + + + + + + + diff --git a/pulsar-functions/runtime-all/src/main/resources/java_instance_log4j2.xml b/pulsar-functions/runtime-all/src/main/resources/java_instance_log4j2.xml index 190d9be92940b..84e6ce44f51e4 100644 --- a/pulsar-functions/runtime-all/src/main/resources/java_instance_log4j2.xml +++ b/pulsar-functions/runtime-all/src/main/resources/java_instance_log4j2.xml @@ -1,3 +1,4 @@ + - pulsar-functions-instance - 30 - - - pulsar.log.appender - RollingFile - - - pulsar.log.level - info - - - bk.log.level - info - - - - - Console - SYSTEM_OUT - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - RollingFile - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.log - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}-%d{MM-dd-yyyy}-%i.log.gz - true - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - 1 - true - - - 1 GB - - - 0 0 0 * * ? - - - - - ${sys:pulsar.function.log.dir} - 2 - - */${sys:pulsar.function.log.file}*log.gz - - - 30d - - - - - - BkRollingFile - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk - ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk-%d{MM-dd-yyyy}-%i.log.gz - true - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - 1 - true - - - 1 GB - - - 0 0 0 * * ? - - - - - ${sys:pulsar.function.log.dir} - 2 - - */${sys:pulsar.function.log.file}.bk*log.gz - - - 30d - - - - - - - - org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper - ${sys:bk.log.level} - false - - BkRollingFile - - - - info - - ${sys:pulsar.log.appender} - ${sys:pulsar.log.level} - - - - \ No newline at end of file + pulsar-functions-instance + 30 + + + pulsar.log.appender + RollingFile + + + pulsar.log.level + info + + + bk.log.level + info + + + + + Console + SYSTEM_OUT + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + RollingFile + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.log + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}-%d{MM-dd-yyyy}-%i.log.gz + true + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + 1 + true + + + 1 GB + + + 0 0 0 * * ? + + + + + ${sys:pulsar.function.log.dir} + 2 + + */${sys:pulsar.function.log.file}*log.gz + + + 30d + + + + + + BkRollingFile + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk + ${sys:pulsar.function.log.dir}/${sys:pulsar.function.log.file}.bk-%d{MM-dd-yyyy}-%i.log.gz + true + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + 1 + true + + + 1 GB + + + 0 0 0 * * ? + + + + + ${sys:pulsar.function.log.dir} + 2 + + */${sys:pulsar.function.log.file}.bk*log.gz + + + 30d + + + + + + + + org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper + ${sys:bk.log.level} + false + + BkRollingFile + + + + info + + ${sys:pulsar.log.appender} + ${sys:pulsar.log.level} + + + + diff --git a/pulsar-functions/runtime-all/src/main/resources/kubernetes_instance_log4j2.xml b/pulsar-functions/runtime-all/src/main/resources/kubernetes_instance_log4j2.xml index f86d03e41793f..fe478be5b8667 100644 --- a/pulsar-functions/runtime-all/src/main/resources/kubernetes_instance_log4j2.xml +++ b/pulsar-functions/runtime-all/src/main/resources/kubernetes_instance_log4j2.xml @@ -1,3 +1,4 @@ + - pulsar-functions-kubernetes-instance - 30 - - - pulsar.log.level - info - - - bk.log.level - info - - - - - Console - SYSTEM_OUT - - %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n - - - - - - org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper - ${sys:bk.log.level} - false - - Console - - - - info - - Console - ${sys:pulsar.log.level} - - - - \ No newline at end of file + pulsar-functions-kubernetes-instance + 30 + + + pulsar.log.level + info + + + bk.log.level + info + + + + + Console + SYSTEM_OUT + + %d{ISO8601_OFFSET_DATE_TIME_HHMM} [%t] %-5level %logger{36} - %msg%n + + + + + + org.apache.pulsar.functions.runtime.shaded.org.apache.bookkeeper + ${sys:bk.log.level} + false + + Console + + + + info + + Console + ${sys:pulsar.log.level} + + + + diff --git a/pulsar-functions/runtime-all/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceDepsTest.java b/pulsar-functions/runtime-all/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceDepsTest.java index 854c146893243..b65bf17f70b44 100644 --- a/pulsar-functions/runtime-all/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceDepsTest.java +++ b/pulsar-functions/runtime-all/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceDepsTest.java @@ -46,6 +46,8 @@ * 8. Apache AVRO * 9. Jackson Mapper and Databind (dependency of AVRO) * 10. Apache Commons Compress (dependency of AVRO) + * 11. Apache Commons Lang (dependency of Apache Commons Compress) + * 12. Apache Commons IO (dependency of Apache Commons Compress) */ public class JavaInstanceDepsTest { @@ -71,6 +73,8 @@ public void testInstanceJarDeps() throws IOException { && !name.startsWith("org/apache/avro") && !name.startsWith("com/fasterxml/jackson") && !name.startsWith("org/apache/commons/compress") + && !name.startsWith("org/apache/commons/lang3") + && !name.startsWith("org/apache/commons/io") && !name.startsWith("com/google") && !name.startsWith("org/checkerframework") && !name.startsWith("javax/annotation") diff --git a/pulsar-functions/runtime/pom.xml b/pulsar-functions/runtime/pom.xml index 728d3f80e1965..f8d039a3073ac 100644 --- a/pulsar-functions/runtime/pom.xml +++ b/pulsar-functions/runtime/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-functions 3.1.0-SNAPSHOT - pulsar-functions-runtime Pulsar Functions :: Runtime - - ${project.groupId} pulsar-functions-instance ${project.version} - ${project.groupId} pulsar-functions-secrets ${project.version} - com.beust jcommander - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.fasterxml.jackson.core jackson-databind - io.kubernetes client-java @@ -84,9 +75,7 @@ pulsar-broker-common ${project.version} - - @@ -106,7 +95,6 @@ - org.apache.maven.plugins @@ -154,5 +142,4 @@ - diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java index 89281a2f550e2..e23838cb34396 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java @@ -38,6 +38,9 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.dynamic.ClassFileLocator; +import net.bytebuddy.pool.TypePool; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.functions.WindowConfig; import org.apache.pulsar.common.nar.NarClassLoader; @@ -325,7 +328,8 @@ public void close() { } private void inferringMissingTypeClassName(Function.FunctionDetails.Builder functionDetailsBuilder, - ClassLoader classLoader) throws ClassNotFoundException { + ClassLoader classLoader) { + TypePool typePool = TypePool.Default.of(ClassFileLocator.ForClassLoader.of(classLoader)); switch (functionDetailsBuilder.getComponentType()) { case FUNCTION: if ((functionDetailsBuilder.hasSource() @@ -344,14 +348,13 @@ private void inferringMissingTypeClassName(Function.FunctionDetails.Builder func WindowConfig.class); className = windowConfig.getActualWindowFunctionClassName(); } - - Class[] typeArgs = FunctionCommon.getFunctionTypes(classLoader.loadClass(className), + TypeDefinition[] typeArgs = FunctionCommon.getFunctionTypes(typePool.describe(className).resolve(), isWindowConfigPresent); if (functionDetailsBuilder.hasSource() && functionDetailsBuilder.getSource().getTypeClassName().isEmpty() && typeArgs[0] != null) { Function.SourceSpec.Builder sourceBuilder = functionDetailsBuilder.getSource().toBuilder(); - sourceBuilder.setTypeClassName(typeArgs[0].getName()); + sourceBuilder.setTypeClassName(typeArgs[0].asErasure().getTypeName()); functionDetailsBuilder.setSource(sourceBuilder.build()); } @@ -359,7 +362,7 @@ private void inferringMissingTypeClassName(Function.FunctionDetails.Builder func && functionDetailsBuilder.getSink().getTypeClassName().isEmpty() && typeArgs[1] != null) { Function.SinkSpec.Builder sinkBuilder = functionDetailsBuilder.getSink().toBuilder(); - sinkBuilder.setTypeClassName(typeArgs[1].getName()); + sinkBuilder.setTypeClassName(typeArgs[1].asErasure().getTypeName()); functionDetailsBuilder.setSink(sinkBuilder.build()); } } @@ -368,7 +371,8 @@ private void inferringMissingTypeClassName(Function.FunctionDetails.Builder func if ((functionDetailsBuilder.hasSink() && functionDetailsBuilder.getSink().getTypeClassName().isEmpty())) { String typeArg = - getSinkType(functionDetailsBuilder.getSink().getClassName(), classLoader).getName(); + getSinkType(functionDetailsBuilder.getSink().getClassName(), typePool).asErasure() + .getTypeName(); Function.SinkSpec.Builder sinkBuilder = Function.SinkSpec.newBuilder(functionDetailsBuilder.getSink()); @@ -387,7 +391,8 @@ private void inferringMissingTypeClassName(Function.FunctionDetails.Builder func if ((functionDetailsBuilder.hasSource() && functionDetailsBuilder.getSource().getTypeClassName().isEmpty())) { String typeArg = - getSourceType(functionDetailsBuilder.getSource().getClassName(), classLoader).getName(); + getSourceType(functionDetailsBuilder.getSource().getClassName(), typePool).asErasure() + .getTypeName(); Function.SourceSpec.Builder sourceBuilder = Function.SourceSpec.newBuilder(functionDetailsBuilder.getSource()); diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java index ed128568bcf50..9dca4015d5ef5 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java @@ -124,17 +124,17 @@ private static ClassLoader getFunctionClassLoader(InstanceConfig instanceConfig, if (componentType == Function.FunctionDetails.ComponentType.FUNCTION && functionsManager.isPresent()) { return functionsManager.get() .getFunction(instanceConfig.getFunctionDetails().getBuiltin()) - .getClassLoader(); + .getFunctionPackage().getClassLoader(); } if (componentType == Function.FunctionDetails.ComponentType.SOURCE && connectorsManager.isPresent()) { return connectorsManager.get() .getConnector(instanceConfig.getFunctionDetails().getSource().getBuiltin()) - .getClassLoader(); + .getConnectorFunctionPackage().getClassLoader(); } if (componentType == Function.FunctionDetails.ComponentType.SINK && connectorsManager.isPresent()) { return connectorsManager.get() .getConnector(instanceConfig.getFunctionDetails().getSink().getBuiltin()) - .getClassLoader(); + .getConnectorFunctionPackage().getClassLoader(); } } return loadJars(jarFile, instanceConfig, functionId, instanceConfig.getFunctionDetails().getName(), diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/ConnectorsManager.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/ConnectorsManager.java index e1770b8b64415..19d31d0f63b1d 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/ConnectorsManager.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/ConnectorsManager.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.functions.worker; +import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.nio.file.Path; import java.util.List; @@ -27,18 +28,35 @@ import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.common.io.ConfigFieldDefinition; import org.apache.pulsar.common.io.ConnectorDefinition; +import org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory; import org.apache.pulsar.functions.utils.io.Connector; import org.apache.pulsar.functions.utils.io.ConnectorUtils; @Slf4j -public class ConnectorsManager { +public class ConnectorsManager implements AutoCloseable { @Getter private volatile TreeMap connectors; + @VisibleForTesting + public ConnectorsManager() { + this.connectors = new TreeMap<>(); + } + public ConnectorsManager(WorkerConfig workerConfig) throws IOException { - this.connectors = ConnectorUtils - .searchForConnectors(workerConfig.getConnectorsDirectory(), workerConfig.getNarExtractionDirectory()); + this.connectors = createConnectors(workerConfig); + } + + private static TreeMap createConnectors(WorkerConfig workerConfig) throws IOException { + boolean enableClassloading = workerConfig.getEnableClassloadingOfBuiltinFiles() + || ThreadRuntimeFactory.class.getName().equals(workerConfig.getFunctionRuntimeFactoryClassName()); + return ConnectorUtils.searchForConnectors(workerConfig.getConnectorsDirectory(), + workerConfig.getNarExtractionDirectory(), enableClassloading); + } + + @VisibleForTesting + public void addConnector(String connectorType, Connector connector) { + connectors.put(connectorType, connector); } public Connector getConnector(String connectorType) { @@ -71,7 +89,25 @@ public Path getSinkArchive(String sinkType) { } public void reloadConnectors(WorkerConfig workerConfig) throws IOException { - connectors = ConnectorUtils - .searchForConnectors(workerConfig.getConnectorsDirectory(), workerConfig.getNarExtractionDirectory()); + TreeMap oldConnectors = connectors; + this.connectors = createConnectors(workerConfig); + closeConnectors(oldConnectors); } + + @Override + public void close() { + closeConnectors(connectors); + } + + private void closeConnectors(TreeMap connectorMap) { + connectorMap.values().forEach(connector -> { + try { + connector.close(); + } catch (Exception e) { + log.warn("Failed to close connector", e); + } + }); + connectorMap.clear(); + } + } diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/FunctionsManager.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/FunctionsManager.java index 9199d568cad03..5ab7ff7221abb 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/FunctionsManager.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/FunctionsManager.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.functions.worker; +import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.nio.file.Path; import java.util.List; @@ -25,16 +26,25 @@ import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.common.functions.FunctionDefinition; +import org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory; import org.apache.pulsar.functions.utils.functions.FunctionArchive; import org.apache.pulsar.functions.utils.functions.FunctionUtils; @Slf4j -public class FunctionsManager { - +public class FunctionsManager implements AutoCloseable { private TreeMap functions; + @VisibleForTesting + public FunctionsManager() { + this.functions = new TreeMap<>(); + } + public FunctionsManager(WorkerConfig workerConfig) throws IOException { - this.functions = FunctionUtils.searchForFunctions(workerConfig.getFunctionsDirectory()); + this.functions = createFunctions(workerConfig); + } + + public void addFunction(String functionType, FunctionArchive functionArchive) { + functions.put(functionType, functionArchive); } public FunctionArchive getFunction(String functionType) { @@ -51,6 +61,32 @@ public List getFunctionDefinitions() { } public void reloadFunctions(WorkerConfig workerConfig) throws IOException { - this.functions = FunctionUtils.searchForFunctions(workerConfig.getFunctionsDirectory()); + TreeMap oldFunctions = functions; + this.functions = createFunctions(workerConfig); + closeFunctions(oldFunctions); + } + + private static TreeMap createFunctions(WorkerConfig workerConfig) throws IOException { + boolean enableClassloading = workerConfig.getEnableClassloadingOfBuiltinFiles() + || ThreadRuntimeFactory.class.getName().equals(workerConfig.getFunctionRuntimeFactoryClassName()); + return FunctionUtils.searchForFunctions(workerConfig.getFunctionsDirectory(), + workerConfig.getNarExtractionDirectory(), + enableClassloading); + } + + @Override + public void close() { + closeFunctions(functions); + } + + private void closeFunctions(TreeMap functionMap) { + functionMap.values().forEach(functionArchive -> { + try { + functionArchive.close(); + } catch (Exception e) { + log.warn("Failed to close function archive", e); + } + }); + functionMap.clear(); } } diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/WorkerConfig.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/WorkerConfig.java index 0ed73953d7aa7..ec0e620d0ae8b 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/WorkerConfig.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/worker/WorkerConfig.java @@ -238,6 +238,22 @@ public class WorkerConfig implements Serializable, PulsarConfiguration { ) private boolean zooKeeperAllowReadOnlyOperations; + @FieldContext( + category = CATEGORY_WORKER, + doc = "Specifies if the function worker should use classloading for validating submissions for built-in " + + "connectors and functions. This is required for validateConnectorConfig to take effect. " + + "Default is false." + ) + private Boolean enableClassloadingOfBuiltinFiles = false; + + @FieldContext( + category = CATEGORY_WORKER, + doc = "Specifies if the function worker should use classloading for validating submissions for external " + + "connectors and functions. This is required for validateConnectorConfig to take effect. " + + "Default is false." + ) + private Boolean enableClassloadingOfExternalFiles = false; + @FieldContext( category = CATEGORY_CONNECTORS, doc = "The path to the location to locate builtin connectors" @@ -250,7 +266,22 @@ public class WorkerConfig implements Serializable, PulsarConfiguration { private String narExtractionDirectory = NarClassLoader.DEFAULT_NAR_EXTRACTION_DIR; @FieldContext( category = CATEGORY_CONNECTORS, - doc = "Should we validate connector config during submission" + doc = "Whether to enable referencing connectors directory files by file url in connector (sink/source) " + + "creation. Default is true." + ) + private Boolean enableReferencingConnectorDirectoryFiles = true; + @FieldContext( + category = CATEGORY_FUNCTIONS, + doc = "Regex patterns for enabling creation of connectors by referencing packages in matching http/https " + + "urls." + ) + private List additionalEnabledConnectorUrlPatterns = new ArrayList<>(); + @FieldContext( + category = CATEGORY_CONNECTORS, + doc = "Enables extended validation for connector config with fine-grain annotation based validation " + + "during submission. Classloading with either enableClassloadingOfExternalFiles or " + + "enableClassloadingOfBuiltinFiles must be enabled on the worker for this to take effect. " + + "Default is false." ) private Boolean validateConnectorConfig = false; @FieldContext( @@ -263,6 +294,18 @@ public class WorkerConfig implements Serializable, PulsarConfiguration { doc = "The path to the location to locate builtin functions" ) private String functionsDirectory = "./functions"; + @FieldContext( + category = CATEGORY_FUNCTIONS, + doc = "Whether to enable referencing functions directory files by file url in functions creation. " + + "Default is true." + ) + private Boolean enableReferencingFunctionsDirectoryFiles = true; + @FieldContext( + category = CATEGORY_FUNCTIONS, + doc = "Regex patterns for enabling creation of functions by referencing packages in matching http/https " + + "urls." + ) + private List additionalEnabledFunctionsUrlPatterns = new ArrayList<>(); @FieldContext( category = CATEGORY_FUNC_METADATA_MNG, doc = "The Pulsar topic used for storing function metadata" diff --git a/pulsar-functions/runtime/src/main/resources/findbugsExclude.xml b/pulsar-functions/runtime/src/main/resources/findbugsExclude.xml index b4a4e5926de3f..976abe7c54bb2 100644 --- a/pulsar-functions/runtime/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/runtime/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - \ No newline at end of file + + + + + + + + + + diff --git a/pulsar-functions/secrets/pom.xml b/pulsar-functions/secrets/pom.xml index 93069bd4a3b64..db7c2d2712e89 100644 --- a/pulsar-functions/secrets/pom.xml +++ b/pulsar-functions/secrets/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-functions 3.1.0-SNAPSHOT - pulsar-functions-secrets Pulsar Functions :: Secrets - io.kubernetes @@ -50,7 +48,6 @@ - ${project.groupId} pulsar-functions-proto @@ -76,7 +73,6 @@
    - com.github.spotbugs spotbugs-maven-plugin @@ -109,5 +105,4 @@
    -
    diff --git a/pulsar-functions/secrets/src/main/resources/findbugsExclude.xml b/pulsar-functions/secrets/src/main/resources/findbugsExclude.xml index 0cab8ae6a418a..7d4464ae1c08d 100644 --- a/pulsar-functions/secrets/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/secrets/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-functions 3.1.0-SNAPSHOT - pulsar-functions-utils Pulsar Functions :: Utils - ${project.groupId} pulsar-common ${project.version} - ${project.groupId} pulsar-io-core ${project.version} - ${project.groupId} pulsar-functions-proto ${project.version} - ${project.groupId} pulsar-functions-api ${project.version} - ${project.groupId} pulsar-config-validation ${project.version} - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.fasterxml.jackson.core jackson-annotations - com.fasterxml.jackson.core jackson-databind - com.google.code.gson gson - net.jodah typetools - - ${project.groupId} - pulsar-client-original - ${project.version} + net.bytebuddy + byte-buddy + + + org.zeroturnaround + zt-zip + 1.17 + + + ${project.groupId} + pulsar-client-original + ${project.version} - ${project.groupId} pulsar-package-core ${project.version} - com.github.tomakehurst wiremock-jre8 ${wiremock.version} test - @@ -126,7 +119,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin @@ -142,5 +134,4 @@ - diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java index 7df173da0f195..6a3d2f6ad7ddb 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java @@ -22,16 +22,9 @@ import com.google.protobuf.AbstractMessage.Builder; import com.google.protobuf.MessageOrBuilder; import com.google.protobuf.util.JsonFormat; -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; -import java.io.ObjectOutputStream; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.net.MalformedURLException; import java.net.ServerSocket; import java.net.URISyntaxException; import java.net.URL; @@ -41,10 +34,14 @@ import java.util.Collection; import java.util.Map; import java.util.UUID; +import java.util.concurrent.CompletableFuture; import lombok.AccessLevel; import lombok.NoArgsConstructor; import lombok.extern.slf4j.Slf4j; -import net.jodah.typetools.TypeResolver; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.description.type.TypeList; +import net.bytebuddy.pool.TypePool; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.MessageId; @@ -54,16 +51,11 @@ import org.apache.pulsar.client.impl.auth.AuthenticationDataBasic; import org.apache.pulsar.common.functions.FunctionConfig; import org.apache.pulsar.common.functions.Utils; -import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.nar.NarClassLoaderBuilder; -import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.functions.api.Function; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.api.WindowFunction; import org.apache.pulsar.functions.proto.Function.FunctionDetails.ComponentType; import org.apache.pulsar.functions.proto.Function.FunctionDetails.Runtime; -import org.apache.pulsar.functions.utils.functions.FunctionUtils; -import org.apache.pulsar.functions.utils.io.ConnectorUtils; import org.apache.pulsar.io.core.BatchSource; import org.apache.pulsar.io.core.Sink; import org.apache.pulsar.io.core.Source; @@ -97,50 +89,74 @@ public static int findAvailablePort() { } } - public static Class[] getFunctionTypes(FunctionConfig functionConfig, ClassLoader classLoader) + public static TypeDefinition[] getFunctionTypes(FunctionConfig functionConfig, TypePool typePool) throws ClassNotFoundException { - return getFunctionTypes(functionConfig, classLoader.loadClass(functionConfig.getClassName())); + return getFunctionTypes(functionConfig, typePool.describe(functionConfig.getClassName()).resolve()); } - public static Class[] getFunctionTypes(FunctionConfig functionConfig, Class functionClass) - throws ClassNotFoundException { + public static TypeDefinition[] getFunctionTypes(FunctionConfig functionConfig, TypeDefinition functionClass) { boolean isWindowConfigPresent = functionConfig.getWindowConfig() != null; return getFunctionTypes(functionClass, isWindowConfigPresent); } - public static Class[] getFunctionTypes(Class userClass, boolean isWindowConfigPresent) { + public static TypeDefinition[] getFunctionTypes(TypeDefinition userClass, boolean isWindowConfigPresent) { Class classParent = getFunctionClassParent(userClass, isWindowConfigPresent); - Class[] typeArgs = TypeResolver.resolveRawArguments(classParent, userClass); + TypeList.Generic typeArgsList = resolveInterfaceTypeArguments(userClass, classParent); + TypeDescription.Generic[] typeArgs = new TypeDescription.Generic[2]; + typeArgs[0] = typeArgsList.get(0); + typeArgs[1] = typeArgsList.get(1); // if window function if (isWindowConfigPresent) { if (classParent.equals(java.util.function.Function.class)) { - if (!typeArgs[0].equals(Collection.class)) { + if (!typeArgs[0].asErasure().isAssignableTo(Collection.class)) { throw new IllegalArgumentException("Window function must take a collection as input"); } - typeArgs[0] = (Class) unwrapType(classParent, userClass, 0); + typeArgs[0] = typeArgs[0].getTypeArguments().get(0); } } - if (typeArgs[1].equals(Record.class)) { - typeArgs[1] = (Class) unwrapType(classParent, userClass, 1); + if (typeArgs[1].asErasure().isAssignableTo(Record.class)) { + typeArgs[1] = typeArgs[1].getTypeArguments().get(0); + } + if (typeArgs[1].asErasure().isAssignableTo(CompletableFuture.class)) { + typeArgs[1] = typeArgs[1].getTypeArguments().get(0); } - return typeArgs; } - public static Class[] getRawFunctionTypes(Class userClass, boolean isWindowConfigPresent) { + private static TypeList.Generic resolveInterfaceTypeArguments(TypeDefinition userClass, Class interfaceClass) { + if (!interfaceClass.isInterface()) { + throw new IllegalArgumentException("interfaceClass must be an interface"); + } + for (TypeDescription.Generic interfaze : userClass.getInterfaces()) { + if (interfaze.asErasure().isAssignableTo(interfaceClass)) { + return interfaze.getTypeArguments(); + } + } + if (userClass.getSuperClass() != null) { + return resolveInterfaceTypeArguments(userClass.getSuperClass(), interfaceClass); + } + return null; + } + + public static TypeDescription.Generic[] getRawFunctionTypes(TypeDefinition userClass, + boolean isWindowConfigPresent) { Class classParent = getFunctionClassParent(userClass, isWindowConfigPresent); - return TypeResolver.resolveRawArguments(classParent, userClass); + TypeList.Generic typeArgsList = resolveInterfaceTypeArguments(userClass, classParent); + TypeDescription.Generic[] typeArgs = new TypeDescription.Generic[2]; + typeArgs[0] = typeArgsList.get(0); + typeArgs[1] = typeArgsList.get(1); + return typeArgs; } - public static Class getFunctionClassParent(Class userClass, boolean isWindowConfigPresent) { + public static Class getFunctionClassParent(TypeDefinition userClass, boolean isWindowConfigPresent) { if (isWindowConfigPresent) { - if (WindowFunction.class.isAssignableFrom(userClass)) { + if (userClass.asErasure().isAssignableTo(WindowFunction.class)) { return WindowFunction.class; } else { return java.util.function.Function.class; } } else { - if (Function.class.isAssignableFrom(userClass)) { + if (userClass.asErasure().isAssignableTo(Function.class)) { return Function.class; } else { return java.util.function.Function.class; @@ -148,41 +164,6 @@ public static Class getFunctionClassParent(Class userClass, boolean isWind } } - private static Type unwrapType(Class type, Class subType, int position) { - Type genericType = TypeResolver.resolveGenericType(type, subType); - Type argType = ((ParameterizedType) genericType).getActualTypeArguments()[position]; - return ((ParameterizedType) argType).getActualTypeArguments()[0]; - } - - public static Object createInstance(String userClassName, ClassLoader classLoader) { - Class theCls; - try { - theCls = Class.forName(userClassName); - } catch (ClassNotFoundException | NoClassDefFoundError cnfe) { - try { - theCls = Class.forName(userClassName, true, classLoader); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new RuntimeException("User class must be in class path", cnfe); - } - } - Object result; - try { - Constructor meth = theCls.getDeclaredConstructor(); - meth.setAccessible(true); - result = meth.newInstance(); - } catch (InstantiationException ie) { - throw new RuntimeException("User class must be concrete", ie); - } catch (NoSuchMethodException e) { - throw new RuntimeException("User class doesn't have such method", e); - } catch (IllegalAccessException e) { - throw new RuntimeException("User class must have a no-arg constructor", e); - } catch (InvocationTargetException e) { - throw new RuntimeException("User class constructor throws exception", e); - } - return result; - - } - public static Runtime convertRuntime(FunctionConfig.Runtime runtime) { for (Runtime type : Runtime.values()) { if (type.name().equals(runtime.name())) { @@ -223,29 +204,34 @@ public static FunctionConfig.ProcessingGuarantees convertProcessingGuarantee( throw new RuntimeException("Unrecognized processing guarantee: " + processingGuarantees.name()); } - public static Class getSourceType(String className, ClassLoader classLoader) throws ClassNotFoundException { - return getSourceType(classLoader.loadClass(className)); + public static TypeDefinition getSourceType(String className, TypePool typePool) { + return getSourceType(typePool.describe(className).resolve()); } - public static Class getSourceType(Class sourceClass) { - - if (Source.class.isAssignableFrom(sourceClass)) { - return TypeResolver.resolveRawArgument(Source.class, sourceClass); - } else if (BatchSource.class.isAssignableFrom(sourceClass)) { - return TypeResolver.resolveRawArgument(BatchSource.class, sourceClass); + public static TypeDefinition getSourceType(TypeDefinition sourceClass) { + if (sourceClass.asErasure().isAssignableTo(Source.class)) { + return resolveInterfaceTypeArguments(sourceClass, Source.class).get(0); + } else if (sourceClass.asErasure().isAssignableTo(BatchSource.class)) { + return resolveInterfaceTypeArguments(sourceClass, BatchSource.class).get(0); } else { throw new IllegalArgumentException( String.format("Source class %s does not implement the correct interface", - sourceClass.getName())); + sourceClass.getActualName())); } } - public static Class getSinkType(String className, ClassLoader classLoader) throws ClassNotFoundException { - return getSinkType(classLoader.loadClass(className)); + public static TypeDefinition getSinkType(String className, TypePool typePool) { + return getSinkType(typePool.describe(className).resolve()); } - public static Class getSinkType(Class sinkClass) { - return TypeResolver.resolveRawArgument(Sink.class, sinkClass); + public static TypeDefinition getSinkType(TypeDefinition sinkClass) { + if (sinkClass.asErasure().isAssignableTo(Sink.class)) { + return resolveInterfaceTypeArguments(sinkClass, Sink.class).get(0); + } else { + throw new IllegalArgumentException( + String.format("Sink class %s does not implement the correct interface", + sinkClass.getActualName())); + } } public static void downloadFromHttpUrl(String destPkgUrl, File targetFile) throws IOException { @@ -264,16 +250,6 @@ public static void downloadFromHttpUrl(String destPkgUrl, File targetFile) throw log.info("Downloading function package from {} to {} completed!", destPkgUrl, targetFile.getAbsoluteFile()); } - public static ClassLoader extractClassLoader(String destPkgUrl) throws IOException, URISyntaxException { - File file = extractFileFromPkgURL(destPkgUrl); - try { - return ClassLoaderUtils.loadJar(file); - } catch (MalformedURLException e) { - throw new IllegalArgumentException( - "Corrupt User PackageFile " + file + " with error " + e.getMessage()); - } - } - public static File createPkgTempFile() throws IOException { return File.createTempFile("functions", ".tmp"); } @@ -297,21 +273,6 @@ public static File extractFileFromPkgURL(String destPkgUrl) throws IOException, } } - public static NarClassLoader extractNarClassLoader(File packageFile, - String narExtractionDirectory) { - if (packageFile != null) { - try { - return NarClassLoaderBuilder.builder() - .narFile(packageFile) - .extractionDirectory(narExtractionDirectory) - .build(); - } catch (IOException e) { - throw new IllegalArgumentException(e.getMessage()); - } - } - return null; - } - public static String getFullyQualifiedInstanceId(org.apache.pulsar.functions.proto.Function.Instance instance) { return getFullyQualifiedInstanceId( instance.getFunctionMetaData().getFunctionDetails().getTenant(), @@ -345,17 +306,6 @@ public static final MessageId getMessageId(long sequenceId) { return new MessageIdImpl(ledgerId, entryId, -1); } - public static byte[] toByteArray(Object obj) throws IOException { - byte[] bytes = null; - try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); - ObjectOutputStream oos = new ObjectOutputStream(bos)) { - oos.writeObject(obj); - oos.flush(); - bytes = bos.toByteArray(); - } - return bytes; - } - public static String getUniquePackageName(String packageName) { return String.format("%s-%s", UUID.randomUUID().toString(), packageName); } @@ -403,146 +353,11 @@ private static String extractFromFullyQualifiedName(String fqfn, int index) { throw new RuntimeException("Invalid Fully Qualified Function Name " + fqfn); } - public static Class getTypeArg(String className, Class funClass, ClassLoader classLoader) - throws ClassNotFoundException { - Class loadedClass = classLoader.loadClass(className); - if (!funClass.isAssignableFrom(loadedClass)) { - throw new IllegalArgumentException( - String.format("class %s is not type of %s", className, funClass.getName())); - } - return TypeResolver.resolveRawArgument(funClass, loadedClass); - } - public static double roundDecimal(double value, int places) { double scale = Math.pow(10, places); return Math.round(value * scale) / scale; } - public static ClassLoader getClassLoaderFromPackage( - ComponentType componentType, - String className, - File packageFile, - String narExtractionDirectory) { - String connectorClassName = className; - ClassLoader jarClassLoader = null; - boolean keepJarClassLoader = false; - ClassLoader narClassLoader = null; - boolean keepNarClassLoader = false; - - Exception jarClassLoaderException = null; - Exception narClassLoaderException = null; - - try { - try { - jarClassLoader = ClassLoaderUtils.extractClassLoader(packageFile); - } catch (Exception e) { - jarClassLoaderException = e; - } - try { - narClassLoader = FunctionCommon.extractNarClassLoader(packageFile, narExtractionDirectory); - } catch (Exception e) { - narClassLoaderException = e; - } - - // if connector class name is not provided, we can only try to load archive as a NAR - if (isEmpty(connectorClassName)) { - if (narClassLoader == null) { - throw new IllegalArgumentException(String.format("%s package does not have the correct format. " - + "Pulsar cannot determine if the package is a NAR package or JAR package. " - + "%s classname is not provided and attempts to load it as a NAR package produced " - + "the following error.", - capFirstLetter(componentType), capFirstLetter(componentType)), - narClassLoaderException); - } - try { - if (componentType == ComponentType.FUNCTION) { - connectorClassName = FunctionUtils.getFunctionClass(narClassLoader); - } else if (componentType == ComponentType.SOURCE) { - connectorClassName = ConnectorUtils.getIOSourceClass((NarClassLoader) narClassLoader); - } else { - connectorClassName = ConnectorUtils.getIOSinkClass((NarClassLoader) narClassLoader); - } - } catch (IOException e) { - throw new IllegalArgumentException(String.format("Failed to extract %s class from archive", - componentType.toString().toLowerCase()), e); - } - - try { - narClassLoader.loadClass(connectorClassName); - keepNarClassLoader = true; - return narClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", capFirstLetter(componentType), - connectorClassName), e); - } - - } else { - // if connector class name is provided, we need to try to load it as a JAR and as a NAR. - if (jarClassLoader != null) { - try { - jarClassLoader.loadClass(connectorClassName); - keepJarClassLoader = true; - return jarClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e) { - // class not found in JAR try loading as a NAR and searching for the class - if (narClassLoader != null) { - - try { - narClassLoader.loadClass(connectorClassName); - keepNarClassLoader = true; - return narClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e1) { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", - capFirstLetter(componentType), connectorClassName), e1); - } - } else { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", capFirstLetter(componentType), - connectorClassName), e); - } - } - } else if (narClassLoader != null) { - try { - narClassLoader.loadClass(connectorClassName); - keepNarClassLoader = true; - return narClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e1) { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", - capFirstLetter(componentType), connectorClassName), e1); - } - } else { - StringBuilder errorMsg = new StringBuilder(capFirstLetter(componentType) - + " package does not have the correct format." - + " Pulsar cannot determine if the package is a NAR package or JAR package."); - - if (jarClassLoaderException != null) { - errorMsg.append( - " Attempts to load it as a JAR package produced error: " + jarClassLoaderException - .getMessage()); - } - - if (narClassLoaderException != null) { - errorMsg.append( - " Attempts to load it as a NAR package produced error: " + narClassLoaderException - .getMessage()); - } - - throw new IllegalArgumentException(errorMsg.toString()); - } - } - } finally { - if (!keepJarClassLoader) { - ClassLoaderUtils.closeClassLoader(jarClassLoader); - } - if (!keepNarClassLoader) { - ClassLoaderUtils.closeClassLoader(narClassLoader); - } - } - } - public static String capFirstLetter(Enum en) { return StringUtils.capitalize(en.toString().toLowerCase()); } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java index 8e95e0fecf998..ee59317daf755 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java @@ -18,12 +18,11 @@ */ package org.apache.pulsar.functions.utils; -import static org.apache.commons.lang.StringUtils.isBlank; -import static org.apache.commons.lang.StringUtils.isNotBlank; -import static org.apache.commons.lang.StringUtils.isNotEmpty; +import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.commons.lang3.StringUtils.isEmpty; +import static org.apache.commons.lang3.StringUtils.isNotBlank; +import static org.apache.commons.lang3.StringUtils.isNotEmpty; import static org.apache.pulsar.common.functions.Utils.BUILTIN; -import static org.apache.pulsar.common.util.ClassLoaderUtils.loadJar; import static org.apache.pulsar.functions.utils.FunctionCommon.convertFromCompressionType; import static org.apache.pulsar.functions.utils.FunctionCommon.convertFromFunctionDetailsCompressionType; import static org.apache.pulsar.functions.utils.FunctionCommon.convertFromFunctionDetailsSubscriptionPosition; @@ -32,9 +31,7 @@ import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; import java.io.File; -import java.io.IOException; import java.lang.reflect.Type; -import java.net.MalformedURLException; import java.util.Collection; import java.util.HashMap; import java.util.LinkedList; @@ -44,10 +41,13 @@ import lombok.Getter; import lombok.Setter; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang.StringUtils; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.pool.TypePool; +import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.common.functions.ConsumerConfig; import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.FunctionDefinition; import org.apache.pulsar.common.functions.ProducerConfig; import org.apache.pulsar.common.functions.Resources; import org.apache.pulsar.common.functions.WindowConfig; @@ -55,7 +55,6 @@ import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.FunctionDetails; -import org.apache.pulsar.functions.utils.functions.FunctionUtils; @Slf4j public class FunctionConfigUtils { @@ -74,26 +73,21 @@ public static class ExtractedFunctionDetails { private static final ObjectMapper OBJECT_MAPPER = ObjectMapperFactory.create(); - public static FunctionDetails convert(FunctionConfig functionConfig, ClassLoader classLoader) - throws IllegalArgumentException { + public static FunctionDetails convert(FunctionConfig functionConfig) { + return convert(functionConfig, (ValidatableFunctionPackage) null); + } - if (functionConfig.getRuntime() == FunctionConfig.Runtime.JAVA) { - if (classLoader != null) { - try { - Class[] typeArgs = FunctionCommon.getFunctionTypes(functionConfig, classLoader); - return convert( - functionConfig, - new ExtractedFunctionDetails( - functionConfig.getClassName(), - typeArgs[0].getName(), - typeArgs[1].getName())); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException( - String.format("Function class %s must be in class path", functionConfig.getClassName()), e); - } - } + public static FunctionDetails convert(FunctionConfig functionConfig, + ValidatableFunctionPackage validatableFunctionPackage) + throws IllegalArgumentException { + if (functionConfig == null) { + throw new IllegalArgumentException("Function config is not provided"); + } + if (functionConfig.getRuntime() == FunctionConfig.Runtime.JAVA && validatableFunctionPackage != null) { + return convert(functionConfig, doJavaChecks(functionConfig, validatableFunctionPackage)); + } else { + return convert(functionConfig, new ExtractedFunctionDetails(functionConfig.getClassName(), null, null)); } - return convert(functionConfig, new ExtractedFunctionDetails(functionConfig.getClassName(), null, null)); } public static FunctionDetails convert(FunctionConfig functionConfig, ExtractedFunctionDetails extractedDetails) @@ -280,6 +274,12 @@ public static FunctionDetails convert(FunctionConfig functionConfig, ExtractedFu } sinkSpecBuilder.setProducerSpec(pbldr.build()); } + if (functionConfig.getBatchBuilder() != null) { + Function.ProducerSpec.Builder builder = sinkSpecBuilder.getProducerSpec() != null + ? sinkSpecBuilder.getProducerSpec().toBuilder() + : Function.ProducerSpec.newBuilder(); + sinkSpecBuilder.setProducerSpec(builder.setBatchBuilder(functionConfig.getBatchBuilder()).build()); + } functionDetailsBuilder.setSink(sinkSpecBuilder); if (functionConfig.getTenant() != null) { @@ -587,48 +587,49 @@ public static void inferMissingArguments(FunctionConfig functionConfig, } } - public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfig, ClassLoader clsLoader) { + public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfig, + ValidatableFunctionPackage validatableFunctionPackage) { - String functionClassName = functionConfig.getClassName(); - Class functionClass; + String functionClassName = StringUtils.trimToNull(functionConfig.getClassName()); + TypeDefinition functionClass; try { // if class name in function config is not set, this should be a built-in function // thus we should try to find its class name in the NAR service definition if (functionClassName == null) { - try { - functionClassName = FunctionUtils.getFunctionClass(clsLoader); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to extract source class from archive", e); + FunctionDefinition functionDefinition = + validatableFunctionPackage.getFunctionMetaData(FunctionDefinition.class); + if (functionDefinition == null) { + throw new IllegalArgumentException("Function class name is not provided."); + } + functionClassName = functionDefinition.getFunctionClass(); + if (functionClassName == null) { + throw new IllegalArgumentException("Function class name is not provided."); } } - functionClass = clsLoader.loadClass(functionClassName); + functionClass = validatableFunctionPackage.resolveType(functionClassName); - if (!org.apache.pulsar.functions.api.Function.class.isAssignableFrom(functionClass) - && !java.util.function.Function.class.isAssignableFrom(functionClass) - && !org.apache.pulsar.functions.api.WindowFunction.class.isAssignableFrom(functionClass)) { + if (!functionClass.asErasure().isAssignableTo(org.apache.pulsar.functions.api.Function.class) + && !functionClass.asErasure().isAssignableTo(java.util.function.Function.class) + && !functionClass.asErasure() + .isAssignableTo(org.apache.pulsar.functions.api.WindowFunction.class)) { throw new IllegalArgumentException( String.format("Function class %s does not implement the correct interface", - functionClass.getName())); + functionClassName)); } - } catch (ClassNotFoundException | NoClassDefFoundError e) { + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( - String.format("Function class %s must be in class path", functionConfig.getClassName()), e); + String.format("Function class %s must be in class path", functionClassName), e); } - Class[] typeArgs; - try { - typeArgs = FunctionCommon.getFunctionTypes(functionConfig, functionClass); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException( - String.format("Function class %s must be in class path", functionConfig.getClassName()), e); - } + TypeDefinition[] typeArgs = FunctionCommon.getFunctionTypes(functionConfig, functionClass); // inputs use default schema, so there is no check needed there // Check if the Input serialization/deserialization class exists in jar or already loaded and that it // implements SerDe class if (functionConfig.getCustomSerdeInputs() != null) { functionConfig.getCustomSerdeInputs().forEach((topicName, inputSerializer) -> { - ValidatorUtils.validateSerde(inputSerializer, typeArgs[0], clsLoader, true); + ValidatorUtils.validateSerde(inputSerializer, typeArgs[0], validatableFunctionPackage.getTypePool(), + true); }); } @@ -643,8 +644,8 @@ public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfi throw new IllegalArgumentException( String.format("Topic %s has an incorrect schema Info", topicName)); } - ValidatorUtils.validateSchema(consumerConfig.getSchemaType(), typeArgs[0], clsLoader, true); - + ValidatorUtils.validateSchema(consumerConfig.getSchemaType(), typeArgs[0], + validatableFunctionPackage.getTypePool(), true); }); } @@ -659,13 +660,16 @@ public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfi "Only one of schemaType or serdeClassName should be set in inputSpec"); } if (!isEmpty(conf.getSerdeClassName())) { - ValidatorUtils.validateSerde(conf.getSerdeClassName(), typeArgs[0], clsLoader, true); + ValidatorUtils.validateSerde(conf.getSerdeClassName(), typeArgs[0], + validatableFunctionPackage.getTypePool(), true); } if (!isEmpty(conf.getSchemaType())) { - ValidatorUtils.validateSchema(conf.getSchemaType(), typeArgs[0], clsLoader, true); + ValidatorUtils.validateSchema(conf.getSchemaType(), typeArgs[0], + validatableFunctionPackage.getTypePool(), true); } if (conf.getCryptoConfig() != null) { - ValidatorUtils.validateCryptoKeyReader(conf.getCryptoConfig(), clsLoader, false); + ValidatorUtils.validateCryptoKeyReader(conf.getCryptoConfig(), + validatableFunctionPackage.getTypePool(), false); } }); } @@ -673,8 +677,8 @@ public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfi if (Void.class.equals(typeArgs[1])) { return new FunctionConfigUtils.ExtractedFunctionDetails( functionClassName, - typeArgs[0].getName(), - typeArgs[1].getName()); + typeArgs[0].asErasure().getTypeName(), + typeArgs[1].asErasure().getTypeName()); } // One and only one of outputSchemaType and outputSerdeClassName should be set @@ -684,22 +688,25 @@ public static ExtractedFunctionDetails doJavaChecks(FunctionConfig functionConfi } if (!isEmpty(functionConfig.getOutputSchemaType())) { - ValidatorUtils.validateSchema(functionConfig.getOutputSchemaType(), typeArgs[1], clsLoader, false); + ValidatorUtils.validateSchema(functionConfig.getOutputSchemaType(), typeArgs[1], + validatableFunctionPackage.getTypePool(), false); } if (!isEmpty(functionConfig.getOutputSerdeClassName())) { - ValidatorUtils.validateSerde(functionConfig.getOutputSerdeClassName(), typeArgs[1], clsLoader, false); + ValidatorUtils.validateSerde(functionConfig.getOutputSerdeClassName(), typeArgs[1], + validatableFunctionPackage.getTypePool(), false); } if (functionConfig.getProducerConfig() != null && functionConfig.getProducerConfig().getCryptoConfig() != null) { ValidatorUtils - .validateCryptoKeyReader(functionConfig.getProducerConfig().getCryptoConfig(), clsLoader, true); + .validateCryptoKeyReader(functionConfig.getProducerConfig().getCryptoConfig(), + validatableFunctionPackage.getTypePool(), true); } return new FunctionConfigUtils.ExtractedFunctionDetails( functionClassName, - typeArgs[0].getName(), - typeArgs[1].getName()); + typeArgs[0].asErasure().getTypeName(), + typeArgs[1].asErasure().getTypeName()); } private static void doPythonChecks(FunctionConfig functionConfig) { @@ -906,47 +913,21 @@ public static Collection collectAllInputTopics(FunctionConfig functionCo return retval; } - public static ClassLoader validate(FunctionConfig functionConfig, File functionPackageFile) { + public static void validateNonJavaFunction(FunctionConfig functionConfig) { doCommonChecks(functionConfig); - if (functionConfig.getRuntime() == FunctionConfig.Runtime.JAVA) { - ClassLoader classLoader; - if (functionPackageFile != null) { - try { - classLoader = loadJar(functionPackageFile); - } catch (MalformedURLException e) { - throw new IllegalArgumentException("Corrupted Jar File", e); - } - } else if (!isEmpty(functionConfig.getJar())) { - File jarFile = new File(functionConfig.getJar()); - if (!jarFile.exists()) { - throw new IllegalArgumentException("Jar file does not exist"); - } - try { - classLoader = loadJar(jarFile); - } catch (Exception e) { - throw new IllegalArgumentException("Corrupted Jar File", e); - } - } else { - throw new IllegalArgumentException("Function Package is not provided"); - } - - doJavaChecks(functionConfig, classLoader); - return classLoader; - } else if (functionConfig.getRuntime() == FunctionConfig.Runtime.GO) { + if (functionConfig.getRuntime() == FunctionConfig.Runtime.GO) { doGolangChecks(functionConfig); - return null; } else if (functionConfig.getRuntime() == FunctionConfig.Runtime.PYTHON) { doPythonChecks(functionConfig); - return null; } else { throw new IllegalArgumentException("Function language runtime is either not set or cannot be determined"); } } public static ExtractedFunctionDetails validateJavaFunction(FunctionConfig functionConfig, - ClassLoader classLoader) { + ValidatableFunctionPackage validatableFunctionPackage) { doCommonChecks(functionConfig); - return doJavaChecks(functionConfig, classLoader); + return doJavaChecks(functionConfig, validatableFunctionPackage); } public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) { @@ -1088,6 +1069,9 @@ public static FunctionConfig validateUpdate(FunctionConfig existingConfig, Funct if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } + if (newConfig.getProducerConfig() != null) { + mergedConfig.setProducerConfig(newConfig.getProducerConfig()); + } return mergedConfig; } } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionFilePackage.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionFilePackage.java new file mode 100644 index 0000000000000..8224de32521fb --- /dev/null +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionFilePackage.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.utils; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.MalformedURLException; +import java.net.URLClassLoader; +import java.util.ArrayList; +import java.util.List; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.dynamic.ClassFileLocator; +import net.bytebuddy.pool.TypePool; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.common.nar.NarClassLoaderBuilder; +import org.apache.pulsar.functions.utils.functions.FunctionUtils; +import org.zeroturnaround.zip.ZipUtil; + +/** + * FunctionFilePackage is a class that represents a function package and + * implements the ValidatableFunctionPackage interface which decouples the + * function package from classloading. + */ +public class FunctionFilePackage implements AutoCloseable, ValidatableFunctionPackage { + private final File file; + private final ClassFileLocator.Compound classFileLocator; + private final TypePool typePool; + private final boolean isNar; + private final String narExtractionDirectory; + private final boolean enableClassloading; + + private ClassLoader classLoader; + + private final Object configMetadata; + + public FunctionFilePackage(File file, String narExtractionDirectory, boolean enableClassloading, + Class configClass) { + this.file = file; + boolean nonZeroFile = file.isFile() && file.length() > 0; + this.isNar = nonZeroFile ? ZipUtil.containsAnyEntry(file, + new String[] {"META-INF/services/pulsar-io.yaml", "META-INF/bundled-dependencies"}) : false; + this.narExtractionDirectory = narExtractionDirectory; + this.enableClassloading = enableClassloading; + if (isNar) { + List classpathFromArchive = null; + try { + classpathFromArchive = NarClassLoader.getClasspathFromArchive(file, narExtractionDirectory); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + List classFileLocators = new ArrayList<>(); + classFileLocators.add(ClassFileLocator.ForClassLoader.ofSystemLoader()); + for (File classpath : classpathFromArchive) { + if (classpath.exists()) { + try { + ClassFileLocator locator; + if (classpath.isDirectory()) { + locator = new ClassFileLocator.ForFolder(classpath); + } else { + locator = ClassFileLocator.ForJarFile.of(classpath); + } + classFileLocators.add(locator); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + this.classFileLocator = new ClassFileLocator.Compound(classFileLocators); + this.typePool = TypePool.Default.of(classFileLocator); + try { + this.configMetadata = FunctionUtils.getPulsarIOServiceConfig(file, configClass); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } else { + try { + this.classFileLocator = nonZeroFile + ? new ClassFileLocator.Compound(ClassFileLocator.ForClassLoader.ofSystemLoader(), + ClassFileLocator.ForJarFile.of(file)) : + new ClassFileLocator.Compound(ClassFileLocator.ForClassLoader.ofSystemLoader()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + this.typePool = + TypePool.Default.of(classFileLocator); + this.configMetadata = null; + } + } + + public TypeDescription resolveType(String className) { + return typePool.describe(className).resolve(); + } + + public boolean isNar() { + return isNar; + } + + public File getFile() { + return file; + } + + public TypePool getTypePool() { + return typePool; + } + + @Override + public T getFunctionMetaData(Class clazz) { + return configMetadata != null ? clazz.cast(configMetadata) : null; + } + + @Override + public synchronized void close() throws IOException { + classFileLocator.close(); + if (classLoader instanceof Closeable) { + ((Closeable) classLoader).close(); + } + } + + public boolean isEnableClassloading() { + return enableClassloading; + } + + public synchronized ClassLoader getClassLoader() { + if (classLoader == null) { + classLoader = createClassLoader(); + } + return classLoader; + } + + private ClassLoader createClassLoader() { + if (enableClassloading) { + if (isNar) { + try { + return NarClassLoaderBuilder.builder() + .narFile(file) + .extractionDirectory(narExtractionDirectory) + .build(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } else { + try { + return new URLClassLoader(new java.net.URL[] {file.toURI().toURL()}, + NarClassLoader.class.getClassLoader()); + } catch (MalformedURLException e) { + throw new UncheckedIOException(e); + } + } + } else { + throw new IllegalStateException("Classloading is not enabled"); + } + } + + @Override + public String toString() { + return "FunctionFilePackage{" + + "file=" + file + + ", isNar=" + isNar + + '}'; + } +} diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionRuntimeCommon.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionRuntimeCommon.java new file mode 100644 index 0000000000000..ed17478dd00ed --- /dev/null +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionRuntimeCommon.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.utils; + +import static org.apache.commons.lang3.StringUtils.isEmpty; +import java.io.File; +import java.io.IOException; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.common.nar.NarClassLoaderBuilder; +import org.apache.pulsar.common.util.ClassLoaderUtils; +import org.apache.pulsar.functions.proto.Function; +import org.apache.pulsar.functions.utils.functions.FunctionUtils; +import org.apache.pulsar.functions.utils.io.ConnectorUtils; + +public class FunctionRuntimeCommon { + public static NarClassLoader extractNarClassLoader(File packageFile, + String narExtractionDirectory) { + if (packageFile != null) { + try { + return NarClassLoaderBuilder.builder() + .narFile(packageFile) + .extractionDirectory(narExtractionDirectory) + .build(); + } catch (IOException e) { + throw new IllegalArgumentException(e.getMessage()); + } + } + return null; + } + + public static ClassLoader getClassLoaderFromPackage( + Function.FunctionDetails.ComponentType componentType, + String className, + File packageFile, + String narExtractionDirectory) { + String connectorClassName = className; + ClassLoader jarClassLoader = null; + boolean keepJarClassLoader = false; + NarClassLoader narClassLoader = null; + boolean keepNarClassLoader = false; + + Exception jarClassLoaderException = null; + Exception narClassLoaderException = null; + + try { + try { + jarClassLoader = ClassLoaderUtils.extractClassLoader(packageFile); + } catch (Exception e) { + jarClassLoaderException = e; + } + try { + narClassLoader = extractNarClassLoader(packageFile, narExtractionDirectory); + } catch (Exception e) { + narClassLoaderException = e; + } + + // if connector class name is not provided, we can only try to load archive as a NAR + if (isEmpty(connectorClassName)) { + if (narClassLoader == null) { + throw new IllegalArgumentException(String.format("%s package does not have the correct format. " + + "Pulsar cannot determine if the package is a NAR package or JAR package. " + + "%s classname is not provided and attempts to load it as a NAR package produced " + + "the following error.", + FunctionCommon.capFirstLetter(componentType), FunctionCommon.capFirstLetter(componentType)), + narClassLoaderException); + } + try { + if (componentType == Function.FunctionDetails.ComponentType.FUNCTION) { + connectorClassName = FunctionUtils.getFunctionClass(narClassLoader); + } else if (componentType == Function.FunctionDetails.ComponentType.SOURCE) { + connectorClassName = ConnectorUtils.getIOSourceClass(narClassLoader); + } else { + connectorClassName = ConnectorUtils.getIOSinkClass(narClassLoader); + } + } catch (IOException e) { + throw new IllegalArgumentException(String.format("Failed to extract %s class from archive", + componentType.toString().toLowerCase()), e); + } + + try { + narClassLoader.loadClass(connectorClassName); + keepNarClassLoader = true; + return narClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e) { + throw new IllegalArgumentException(String.format("%s class %s must be in class path", + FunctionCommon.capFirstLetter(componentType), connectorClassName), e); + } + + } else { + // if connector class name is provided, we need to try to load it as a JAR and as a NAR. + if (jarClassLoader != null) { + try { + jarClassLoader.loadClass(connectorClassName); + keepJarClassLoader = true; + return jarClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e) { + // class not found in JAR try loading as a NAR and searching for the class + if (narClassLoader != null) { + + try { + narClassLoader.loadClass(connectorClassName); + keepNarClassLoader = true; + return narClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e1) { + throw new IllegalArgumentException( + String.format("%s class %s must be in class path", + FunctionCommon.capFirstLetter(componentType), connectorClassName), e1); + } + } else { + throw new IllegalArgumentException(String.format("%s class %s must be in class path", + FunctionCommon.capFirstLetter(componentType), connectorClassName), e); + } + } + } else if (narClassLoader != null) { + try { + narClassLoader.loadClass(connectorClassName); + keepNarClassLoader = true; + return narClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e1) { + throw new IllegalArgumentException( + String.format("%s class %s must be in class path", + FunctionCommon.capFirstLetter(componentType), connectorClassName), e1); + } + } else { + StringBuilder errorMsg = new StringBuilder(FunctionCommon.capFirstLetter(componentType) + + " package does not have the correct format." + + " Pulsar cannot determine if the package is a NAR package or JAR package."); + + if (jarClassLoaderException != null) { + errorMsg.append( + " Attempts to load it as a JAR package produced error: " + jarClassLoaderException + .getMessage()); + } + + if (narClassLoaderException != null) { + errorMsg.append( + " Attempts to load it as a NAR package produced error: " + narClassLoaderException + .getMessage()); + } + + throw new IllegalArgumentException(errorMsg.toString()); + } + } + } finally { + if (!keepJarClassLoader) { + ClassLoaderUtils.closeClassLoader(jarClassLoader); + } + if (!keepNarClassLoader) { + ClassLoaderUtils.closeClassLoader(narClassLoader); + } + } + } + +} diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/LoadedFunctionPackage.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/LoadedFunctionPackage.java new file mode 100644 index 0000000000000..e27ed0eca1973 --- /dev/null +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/LoadedFunctionPackage.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.utils; + +import java.io.IOException; +import java.io.UncheckedIOException; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.dynamic.ClassFileLocator; +import net.bytebuddy.pool.TypePool; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.functions.utils.functions.FunctionUtils; + +/** + * LoadedFunctionPackage is a class that represents a function package and + * implements the ValidatableFunctionPackage interface which decouples the + * function package from classloading. This implementation is backed by + * a ClassLoader, and it is used when the function package is already loaded + * by a ClassLoader. This is the case in the LocalRunner and in some of + * the unit tests. + */ +public class LoadedFunctionPackage implements ValidatableFunctionPackage { + private final ClassLoader classLoader; + private final Object configMetadata; + private final TypePool typePool; + + public LoadedFunctionPackage(ClassLoader classLoader, Class configMetadataClass, T configMetadata) { + this.classLoader = classLoader; + this.configMetadata = configMetadata; + typePool = TypePool.Default.of( + ClassFileLocator.ForClassLoader.of(classLoader)); + } + + public LoadedFunctionPackage(ClassLoader classLoader, Class configMetadataClass) { + this.classLoader = classLoader; + if (classLoader instanceof NarClassLoader) { + try { + configMetadata = FunctionUtils.getPulsarIOServiceConfig((NarClassLoader) classLoader, + configMetadataClass); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } else { + configMetadata = null; + } + typePool = TypePool.Default.of( + ClassFileLocator.ForClassLoader.of(classLoader)); + } + + @Override + public TypeDescription resolveType(String className) { + return typePool.describe(className).resolve(); + } + + @Override + public TypePool getTypePool() { + return typePool; + } + + @Override + public T getFunctionMetaData(Class clazz) { + return configMetadata != null ? clazz.cast(configMetadata) : null; + } + + @Override + public boolean isEnableClassloading() { + return true; + } + + @Override + public ClassLoader getClassLoader() { + return classLoader; + } +} diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SinkConfigUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SinkConfigUtils.java index c26df3b3f1416..d93676a106d9a 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SinkConfigUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SinkConfigUtils.java @@ -41,23 +41,23 @@ import lombok.Setter; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.pool.TypePool; import org.apache.commons.lang.StringUtils; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.common.functions.ConsumerConfig; import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.FunctionDefinition; import org.apache.pulsar.common.functions.Resources; import org.apache.pulsar.common.io.ConnectorDefinition; import org.apache.pulsar.common.io.SinkConfig; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.nar.NarClassLoader; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.config.validation.ConfigValidation; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.api.utils.IdentityFunction; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.FunctionDetails; -import org.apache.pulsar.functions.utils.functions.FunctionUtils; -import org.apache.pulsar.functions.utils.io.ConnectorUtils; @Slf4j public class SinkConfigUtils { @@ -210,6 +210,13 @@ public static FunctionDetails convert(SinkConfig sinkConfig, ExtractedSinkDetail functionDetailsBuilder.setSource(sourceSpecBuilder); + if (sinkConfig.getRetainKeyOrdering() != null) { + functionDetailsBuilder.setRetainKeyOrdering(sinkConfig.getRetainKeyOrdering()); + } + if (sinkConfig.getRetainOrdering() != null) { + functionDetailsBuilder.setRetainOrdering(sinkConfig.getRetainOrdering()); + } + if (sinkConfig.getMaxMessageRetries() != null && sinkConfig.getMaxMessageRetries() > 0) { Function.RetryDetails.Builder retryDetails = Function.RetryDetails.newBuilder(); retryDetails.setMaxMessageRetries(sinkConfig.getMaxMessageRetries()); @@ -395,8 +402,8 @@ public static SinkConfig convertFromDetails(FunctionDetails functionDetails) { } public static ExtractedSinkDetails validateAndExtractDetails(SinkConfig sinkConfig, - ClassLoader sinkClassLoader, - ClassLoader functionClassLoader, + ValidatableFunctionPackage sinkFunction, + ValidatableFunctionPackage transformFunction, boolean validateConnectorConfig) { if (isEmpty(sinkConfig.getTenant())) { throw new IllegalArgumentException("Sink tenant cannot be null"); @@ -436,63 +443,72 @@ public static ExtractedSinkDetails validateAndExtractDetails(SinkConfig sinkConf // if class name in sink config is not set, this should be a built-in sink // thus we should try to find it class name in the NAR service definition if (sinkClassName == null) { - try { - sinkClassName = ConnectorUtils.getIOSinkClass((NarClassLoader) sinkClassLoader); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to extract sink class from archive", e); + ConnectorDefinition connectorDefinition = sinkFunction.getFunctionMetaData(ConnectorDefinition.class); + if (connectorDefinition == null) { + throw new IllegalArgumentException( + "Sink package doesn't contain the META-INF/services/pulsar-io.yaml file."); + } + sinkClassName = connectorDefinition.getSinkClass(); + if (sinkClassName == null) { + throw new IllegalArgumentException("Failed to extract sink class from archive"); } } // check if sink implements the correct interfaces - Class sinkClass; + TypeDefinition sinkClass; try { - sinkClass = sinkClassLoader.loadClass(sinkClassName); - } catch (ClassNotFoundException e) { + sinkClass = sinkFunction.resolveType(sinkClassName); + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( - String.format("Sink class %s not found in class loader", sinkClassName), e); + String.format("Sink class %s not found", sinkClassName), e); } String functionClassName = sinkConfig.getTransformFunctionClassName(); - Class typeArg; - ClassLoader inputClassLoader; - if (functionClassLoader != null) { + TypeDefinition typeArg; + ValidatableFunctionPackage inputFunction; + if (transformFunction != null) { // if function class name in sink config is not set, this should be a built-in function // thus we should try to find it class name in the NAR service definition if (functionClassName == null) { - try { - functionClassName = FunctionUtils.getFunctionClass(functionClassLoader); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to extract function class from archive", e); + FunctionDefinition functionDefinition = + transformFunction.getFunctionMetaData(FunctionDefinition.class); + if (functionDefinition == null) { + throw new IllegalArgumentException( + "Function package doesn't contain the META-INF/services/pulsar-io.yaml file."); + } + functionClassName = functionDefinition.getFunctionClass(); + if (functionClassName == null) { + throw new IllegalArgumentException("Transform function class name must be set"); } } - Class functionClass; + TypeDefinition functionClass; try { - functionClass = functionClassLoader.loadClass(functionClassName); - } catch (ClassNotFoundException e) { + functionClass = transformFunction.resolveType(functionClassName); + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( - String.format("Function class %s not found in class loader", functionClassName), e); + String.format("Function class %s not found", functionClassName), e); } // extract type from transform function class - if (!getRawFunctionTypes(functionClass, false)[1].equals(Record.class)) { + if (!getRawFunctionTypes(functionClass, false)[1].asErasure().isAssignableTo(Record.class)) { throw new IllegalArgumentException("Sink transform function output must be of type Record"); } typeArg = getFunctionTypes(functionClass, false)[0]; - inputClassLoader = functionClassLoader; + inputFunction = transformFunction; } else { // extract type from sink class typeArg = getSinkType(sinkClass); - inputClassLoader = sinkClassLoader; + inputFunction = sinkFunction; } if (sinkConfig.getTopicToSerdeClassName() != null) { for (String serdeClassName : sinkConfig.getTopicToSerdeClassName().values()) { - ValidatorUtils.validateSerde(serdeClassName, typeArg, inputClassLoader, true); + ValidatorUtils.validateSerde(serdeClassName, typeArg, inputFunction.getTypePool(), true); } } if (sinkConfig.getTopicToSchemaType() != null) { for (String schemaType : sinkConfig.getTopicToSchemaType().values()) { - ValidatorUtils.validateSchema(schemaType, typeArg, inputClassLoader, true); + ValidatorUtils.validateSchema(schemaType, typeArg, inputFunction.getTypePool(), true); } } @@ -505,23 +521,43 @@ public static ExtractedSinkDetails validateAndExtractDetails(SinkConfig sinkConf throw new IllegalArgumentException("Only one of serdeClassName or schemaType should be set"); } if (!isEmpty(consumerSpec.getSerdeClassName())) { - ValidatorUtils.validateSerde(consumerSpec.getSerdeClassName(), typeArg, inputClassLoader, true); + ValidatorUtils.validateSerde(consumerSpec.getSerdeClassName(), typeArg, + inputFunction.getTypePool(), true); } if (!isEmpty(consumerSpec.getSchemaType())) { - ValidatorUtils.validateSchema(consumerSpec.getSchemaType(), typeArg, inputClassLoader, true); + ValidatorUtils.validateSchema(consumerSpec.getSchemaType(), typeArg, + inputFunction.getTypePool(), true); } if (consumerSpec.getCryptoConfig() != null) { - ValidatorUtils.validateCryptoKeyReader(consumerSpec.getCryptoConfig(), inputClassLoader, false); + ValidatorUtils.validateCryptoKeyReader(consumerSpec.getCryptoConfig(), + inputFunction.getTypePool(), false); } } } - // validate user defined config if enabled and sink is loaded from NAR - if (validateConnectorConfig && sinkClassLoader instanceof NarClassLoader) { - validateSinkConfig(sinkConfig, (NarClassLoader) sinkClassLoader); + if (sinkConfig.getRetainKeyOrdering() != null + && sinkConfig.getRetainKeyOrdering() + && sinkConfig.getProcessingGuarantees() != null + && sinkConfig.getProcessingGuarantees() == FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE) { + throw new IllegalArgumentException( + "When effectively once processing guarantee is specified, retain Key ordering cannot be set"); + } + + if (sinkConfig.getRetainKeyOrdering() != null && sinkConfig.getRetainKeyOrdering() + && sinkConfig.getRetainOrdering() != null && sinkConfig.getRetainOrdering()) { + throw new IllegalArgumentException("Only one of retain ordering or retain key ordering can be set"); + } + + // validate user defined config if enabled and classloading is enabled + if (validateConnectorConfig) { + if (sinkFunction.isEnableClassloading()) { + validateSinkConfig(sinkConfig, sinkFunction); + } else { + log.warn("Skipping annotation based validation of sink config as classloading is disabled"); + } } - return new ExtractedSinkDetails(sinkClassName, typeArg.getName(), functionClassName); + return new ExtractedSinkDetails(sinkClassName, typeArg.asErasure().getTypeName(), functionClassName); } public static Collection collectAllInputTopics(SinkConfig sinkConfig) { @@ -677,29 +713,13 @@ public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig ne return mergedConfig; } - public static void validateSinkConfig(SinkConfig sinkConfig, NarClassLoader narClassLoader) { - - if (sinkConfig.getRetainKeyOrdering() != null - && sinkConfig.getRetainKeyOrdering() - && sinkConfig.getProcessingGuarantees() != null - && sinkConfig.getProcessingGuarantees() == FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE) { - throw new IllegalArgumentException( - "When effectively once processing guarantee is specified, retain Key ordering cannot be set"); - } - - if (sinkConfig.getRetainKeyOrdering() != null && sinkConfig.getRetainKeyOrdering() - && sinkConfig.getRetainOrdering() != null && sinkConfig.getRetainOrdering()) { - throw new IllegalArgumentException("Only one of retain ordering or retain key ordering can be set"); - } - + public static void validateSinkConfig(SinkConfig sinkConfig, ValidatableFunctionPackage sinkFunction) { try { - ConnectorDefinition defn = ConnectorUtils.getConnectorDefinition(narClassLoader); - if (defn.getSinkConfigClass() != null) { - Class configClass = Class.forName(defn.getSinkConfigClass(), true, narClassLoader); + ConnectorDefinition defn = sinkFunction.getFunctionMetaData(ConnectorDefinition.class); + if (defn != null && defn.getSinkConfigClass() != null) { + Class configClass = Class.forName(defn.getSinkConfigClass(), true, sinkFunction.getClassLoader()); validateSinkConfig(sinkConfig, configClass); } - } catch (IOException e) { - throw new IllegalArgumentException("Error validating sink config", e); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Could not find sink config class", e); } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SourceConfigUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SourceConfigUtils.java index ec0c5c444ccba..a6430bbea4585 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SourceConfigUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/SourceConfigUtils.java @@ -35,7 +35,9 @@ import lombok.Setter; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; -import net.jodah.typetools.TypeResolver; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.pool.TypePool; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.functions.ProducerConfig; import org.apache.pulsar.common.functions.Resources; @@ -44,13 +46,11 @@ import org.apache.pulsar.common.io.SourceConfig; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.nar.NarClassLoader; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.config.validation.ConfigValidation; import org.apache.pulsar.functions.api.utils.IdentityFunction; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.FunctionDetails; -import org.apache.pulsar.functions.utils.io.ConnectorUtils; import org.apache.pulsar.io.core.BatchSource; import org.apache.pulsar.io.core.Source; @@ -294,7 +294,7 @@ public static SourceConfig convertFromDetails(FunctionDetails functionDetails) { } public static ExtractedSourceDetails validateAndExtractDetails(SourceConfig sourceConfig, - ClassLoader sourceClassLoader, + ValidatableFunctionPackage sourceFunction, boolean validateConnectorConfig) { if (isEmpty(sourceConfig.getTenant())) { throw new IllegalArgumentException("Source tenant cannot be null"); @@ -319,29 +319,34 @@ public static ExtractedSourceDetails validateAndExtractDetails(SourceConfig sour // if class name in source config is not set, this should be a built-in source // thus we should try to find it class name in the NAR service definition if (sourceClassName == null) { - try { - sourceClassName = ConnectorUtils.getIOSourceClass((NarClassLoader) sourceClassLoader); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to extract source class from archive", e); + ConnectorDefinition connectorDefinition = sourceFunction.getFunctionMetaData(ConnectorDefinition.class); + if (connectorDefinition == null) { + throw new IllegalArgumentException( + "Source package doesn't contain the META-INF/services/pulsar-io.yaml file."); + } + sourceClassName = connectorDefinition.getSourceClass(); + if (sourceClassName == null) { + throw new IllegalArgumentException("Failed to extract source class from archive"); } } // check if source implements the correct interfaces - Class sourceClass; + TypeDescription sourceClass; try { - sourceClass = sourceClassLoader.loadClass(sourceClassName); - } catch (ClassNotFoundException e) { + sourceClass = sourceFunction.resolveType(sourceClassName); + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( String.format("Source class %s not found in class loader", sourceClassName), e); } - if (!Source.class.isAssignableFrom(sourceClass) && !BatchSource.class.isAssignableFrom(sourceClass)) { + if (!(sourceClass.asErasure().isAssignableTo(Source.class) || sourceClass.asErasure() + .isAssignableTo(BatchSource.class))) { throw new IllegalArgumentException( - String.format("Source class %s does not implement the correct interface", - sourceClass.getName())); + String.format("Source class %s does not implement the correct interface", + sourceClass.getName())); } - if (BatchSource.class.isAssignableFrom(sourceClass)) { + if (sourceClass.asErasure().isAssignableTo(BatchSource.class)) { if (sourceConfig.getBatchSourceConfig() != null) { validateBatchSourceConfig(sourceConfig.getBatchSourceConfig()); } else { @@ -352,7 +357,14 @@ public static ExtractedSourceDetails validateAndExtractDetails(SourceConfig sour } // extract type from source class - Class typeArg = getSourceType(sourceClass); + TypeDefinition typeArg; + + try { + typeArg = getSourceType(sourceClass); + } catch (Exception e) { + throw new IllegalArgumentException( + String.format("Failed to resolve type for Source class %s", sourceClassName), e); + } // Only one of serdeClassName or schemaType should be set if (!StringUtils.isEmpty(sourceConfig.getSerdeClassName()) && !StringUtils @@ -361,29 +373,30 @@ public static ExtractedSourceDetails validateAndExtractDetails(SourceConfig sour } if (!StringUtils.isEmpty(sourceConfig.getSerdeClassName())) { - ValidatorUtils.validateSerde(sourceConfig.getSerdeClassName(), typeArg, sourceClassLoader, false); + ValidatorUtils.validateSerde(sourceConfig.getSerdeClassName(), typeArg, sourceFunction.getTypePool(), + false); } if (!StringUtils.isEmpty(sourceConfig.getSchemaType())) { - ValidatorUtils.validateSchema(sourceConfig.getSchemaType(), typeArg, sourceClassLoader, false); + ValidatorUtils.validateSchema(sourceConfig.getSchemaType(), typeArg, sourceFunction.getTypePool(), + false); } if (sourceConfig.getProducerConfig() != null && sourceConfig.getProducerConfig().getCryptoConfig() != null) { ValidatorUtils - .validateCryptoKeyReader(sourceConfig.getProducerConfig().getCryptoConfig(), sourceClassLoader, - true); - } - - if (typeArg.equals(TypeResolver.Unknown.class)) { - throw new IllegalArgumentException( - String.format("Failed to resolve type for Source class %s", sourceClassName)); + .validateCryptoKeyReader(sourceConfig.getProducerConfig().getCryptoConfig(), + sourceFunction.getTypePool(), true); } - // validate user defined config if enabled and source is loaded from NAR - if (validateConnectorConfig && sourceClassLoader instanceof NarClassLoader) { - validateSourceConfig(sourceConfig, (NarClassLoader) sourceClassLoader); + // validate user defined config if enabled and classloading is enabled + if (validateConnectorConfig) { + if (sourceFunction.isEnableClassloading()) { + validateSourceConfig(sourceConfig, sourceFunction); + } else { + log.warn("Skipping annotation based validation of sink config as classloading is disabled"); + } } - return new ExtractedSourceDetails(sourceClassName, typeArg.getName()); + return new ExtractedSourceDetails(sourceClassName, typeArg.asErasure().getTypeName()); } @SneakyThrows @@ -448,6 +461,9 @@ public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceCon validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig()); mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig()); } + if (newConfig.getProducerConfig() != null) { + mergedConfig.setProducerConfig(newConfig.getProducerConfig()); + } return mergedConfig; } @@ -521,15 +537,14 @@ public static void validateBatchSourceConfigUpdate(BatchSourceConfig existingCon } } - public static void validateSourceConfig(SourceConfig sourceConfig, NarClassLoader narClassLoader) { + public static void validateSourceConfig(SourceConfig sourceConfig, ValidatableFunctionPackage sourceFunction) { try { - ConnectorDefinition defn = ConnectorUtils.getConnectorDefinition(narClassLoader); - if (defn.getSourceConfigClass() != null) { - Class configClass = Class.forName(defn.getSourceConfigClass(), true, narClassLoader); + ConnectorDefinition defn = sourceFunction.getFunctionMetaData(ConnectorDefinition.class); + if (defn != null && defn.getSourceConfigClass() != null) { + Class configClass = + Class.forName(defn.getSourceConfigClass(), true, sourceFunction.getClassLoader()); validateSourceConfig(sourceConfig, configClass); } - } catch (IOException e) { - throw new IllegalArgumentException("Error validating source config", e); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Could not find source config class"); } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatableFunctionPackage.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatableFunctionPackage.java new file mode 100644 index 0000000000000..8d5aefb6f6785 --- /dev/null +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatableFunctionPackage.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.utils; + +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.pool.TypePool; + +/** + * This abstraction separates the function and connector definition from classloading, + * enabling validation without the need for classloading. It utilizes Byte Buddy for + * type and annotation resolution. + * + * The function or connector definition is directly extracted from the archive file, + * eliminating the need for classloader initialization. + * + * The getClassLoader method should only be invoked when classloading is enabled. + * Classloading is required in the LocalRunner and in the Functions worker when the + * worker is configured with the 'validateConnectorConfig' set to true. + */ +public interface ValidatableFunctionPackage { + /** + * Resolves the type description for the given class name within the function package. + */ + TypeDescription resolveType(String className); + /** + * Returns the Byte Buddy TypePool instance for the function package. + */ + TypePool getTypePool(); + /** + * Returns the function or connector definition metadata. + * Supports FunctionDefinition and ConnectorDefinition as the metadata type. + */ + T getFunctionMetaData(Class clazz); + /** + * Returns if classloading is enabled for the function package. + */ + boolean isEnableClassloading(); + /** + * Returns the classloader for the function package. The classloader is + * lazily initialized when classloading is enabled. + */ + ClassLoader getClassLoader(); +} diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatorUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatorUtils.java index 390671c5606af..8df6a3f261a6e 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatorUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/ValidatorUtils.java @@ -18,35 +18,40 @@ */ package org.apache.pulsar.functions.utils; -import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.commons.lang3.StringUtils.isEmpty; -import static org.apache.commons.lang3.StringUtils.isNotBlank; import java.util.Map; import lombok.extern.slf4j.Slf4j; -import net.jodah.typetools.TypeResolver; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.description.type.TypeDescription; +import net.bytebuddy.pool.TypePool; import org.apache.pulsar.client.api.CryptoKeyReader; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.functions.CryptoConfig; import org.apache.pulsar.common.schema.SchemaType; -import org.apache.pulsar.common.util.ClassLoaderUtils; -import org.apache.pulsar.common.util.Reflections; import org.apache.pulsar.functions.api.SerDe; -import org.apache.pulsar.functions.proto.Function; -import org.apache.pulsar.io.core.Sink; -import org.apache.pulsar.io.core.Source; @Slf4j public class ValidatorUtils { private static final String DEFAULT_SERDE = "org.apache.pulsar.functions.api.utils.DefaultSerDe"; - public static void validateSchema(String schemaType, Class typeArg, ClassLoader clsLoader, + public static void validateSchema(String schemaType, TypeDefinition typeArg, TypePool typePool, boolean input) { if (isEmpty(schemaType) || getBuiltinSchemaType(schemaType) != null) { // If it's empty, we use the default schema and no need to validate // If it's built-in, no need to validate } else { - ClassLoaderUtils.implementsClass(schemaType, Schema.class, clsLoader); - validateSchemaType(schemaType, typeArg, clsLoader, input); + TypeDescription schemaClass = null; + try { + schemaClass = typePool.describe(schemaType).resolve(); + } catch (TypePool.Resolution.NoSuchTypeException e) { + throw new IllegalArgumentException( + String.format("The schema class %s does not exist", schemaType)); + } + if (!schemaClass.asErasure().isAssignableTo(Schema.class)) { + throw new IllegalArgumentException( + String.format("%s does not implement %s", schemaType, Schema.class.getName())); + } + validateSchemaType(schemaClass, typeArg, typePool, input); } } @@ -60,29 +65,32 @@ private static SchemaType getBuiltinSchemaType(String schemaTypeOrClassName) { } - public static void validateCryptoKeyReader(CryptoConfig conf, ClassLoader classLoader, boolean isProducer) { + public static void validateCryptoKeyReader(CryptoConfig conf, TypePool typePool, boolean isProducer) { if (isEmpty(conf.getCryptoKeyReaderClassName())) { return; } - Class cryptoClass; + String cryptoClassName = conf.getCryptoKeyReaderClassName(); + TypeDescription cryptoClass = null; try { - cryptoClass = ClassLoaderUtils.loadClass(conf.getCryptoKeyReaderClassName(), classLoader); - } catch (ClassNotFoundException | NoClassDefFoundError e) { + cryptoClass = typePool.describe(cryptoClassName).resolve(); + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( - String.format("The crypto key reader class %s does not exist", conf.getCryptoKeyReaderClassName())); + String.format("The crypto key reader class %s does not exist", cryptoClassName)); + } + if (!cryptoClass.asErasure().isAssignableTo(CryptoKeyReader.class)) { + throw new IllegalArgumentException( + String.format("%s does not implement %s", cryptoClassName, CryptoKeyReader.class.getName())); } - ClassLoaderUtils.implementsClass(conf.getCryptoKeyReaderClassName(), CryptoKeyReader.class, classLoader); - try { - cryptoClass.getConstructor(Map.class); - } catch (NoSuchMethodException ex) { + boolean hasConstructor = cryptoClass.getDeclaredMethods().stream() + .anyMatch(method -> method.isConstructor() && method.getParameters().size() == 1 + && method.getParameters().get(0).getType().asErasure().represents(Map.class)); + + if (!hasConstructor) { throw new IllegalArgumentException( String.format("The crypto key reader class %s does not implement the desired constructor.", conf.getCryptoKeyReaderClassName())); - - } catch (SecurityException e) { - throw new IllegalArgumentException("Failed to access crypto key reader class", e); } if (isProducer && (conf.getEncryptionKeys() == null || conf.getEncryptionKeys().length == 0)) { @@ -90,7 +98,7 @@ public static void validateCryptoKeyReader(CryptoConfig conf, ClassLoader classL } } - public static void validateSerde(String inputSerializer, Class typeArg, ClassLoader clsLoader, + public static void validateSerde(String inputSerializer, TypeDefinition typeArg, TypePool typePool, boolean deser) { if (isEmpty(inputSerializer)) { return; @@ -98,154 +106,53 @@ public static void validateSerde(String inputSerializer, Class typeArg, Class if (inputSerializer.equals(DEFAULT_SERDE)) { return; } + TypeDescription serdeClass; try { - Class serdeClass = ClassLoaderUtils.loadClass(inputSerializer, clsLoader); - } catch (ClassNotFoundException | NoClassDefFoundError e) { + serdeClass = typePool.describe(inputSerializer).resolve(); + } catch (TypePool.Resolution.NoSuchTypeException e) { throw new IllegalArgumentException( String.format("The input serialization/deserialization class %s does not exist", inputSerializer)); } - ClassLoaderUtils.implementsClass(inputSerializer, SerDe.class, clsLoader); - - SerDe serDe = (SerDe) Reflections.createInstance(inputSerializer, clsLoader); - if (serDe == null) { - throw new IllegalArgumentException(String.format("The SerDe class %s does not exist", - inputSerializer)); - } - Class[] serDeTypes = TypeResolver.resolveRawArguments(SerDe.class, serDe.getClass()); - - // type inheritance information seems to be lost in generic type - // load the actual type class for verification - Class fnInputClass; - Class serdeInputClass; - try { - fnInputClass = Class.forName(typeArg.getName(), true, clsLoader); - serdeInputClass = Class.forName(serDeTypes[0].getName(), true, clsLoader); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException("Failed to load type class", e); - } + TypeDescription.Generic serDeTypeArg = serdeClass.getInterfaces().stream() + .filter(i -> i.asErasure().isAssignableTo(SerDe.class)) + .findFirst() + .map(i -> i.getTypeArguments().get(0)) + .orElseThrow(() -> new IllegalArgumentException( + String.format("%s does not implement %s", inputSerializer, SerDe.class.getName()))); if (deser) { - if (!fnInputClass.isAssignableFrom(serdeInputClass)) { - throw new IllegalArgumentException("Serializer type mismatch " + typeArg + " vs " + serDeTypes[0]); + if (!serDeTypeArg.asErasure().isAssignableTo(typeArg.asErasure())) { + throw new IllegalArgumentException("Serializer type mismatch " + typeArg.getActualName() + " vs " + + serDeTypeArg.getActualName()); } } else { - if (!serdeInputClass.isAssignableFrom(fnInputClass)) { - throw new IllegalArgumentException("Serializer type mismatch " + typeArg + " vs " + serDeTypes[0]); + if (!serDeTypeArg.asErasure().isAssignableFrom(typeArg.asErasure())) { + throw new IllegalArgumentException("Serializer type mismatch " + typeArg.getActualName() + " vs " + + serDeTypeArg.getActualName()); } } } - private static void validateSchemaType(String schemaClassName, Class typeArg, ClassLoader clsLoader, + private static void validateSchemaType(TypeDefinition schema, TypeDefinition typeArg, TypePool typePool, boolean input) { - Schema schema = (Schema) Reflections.createInstance(schemaClassName, clsLoader); - if (schema == null) { - throw new IllegalArgumentException(String.format("The Schema class %s does not exist", - schemaClassName)); - } - Class[] schemaTypes = TypeResolver.resolveRawArguments(Schema.class, schema.getClass()); - // type inheritance information seems to be lost in generic type - // load the actual type class for verification - Class fnInputClass; - Class schemaInputClass; - try { - fnInputClass = Class.forName(typeArg.getName(), true, clsLoader); - schemaInputClass = Class.forName(schemaTypes[0].getName(), true, clsLoader); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException("Failed to load type class", e); - } + TypeDescription.Generic schemaTypeArg = schema.getInterfaces().stream() + .filter(i -> i.asErasure().isAssignableTo(Schema.class)) + .findFirst() + .map(i -> i.getTypeArguments().get(0)) + .orElse(null); if (input) { - if (!fnInputClass.isAssignableFrom(schemaInputClass)) { + if (!schemaTypeArg.asErasure().isAssignableTo(typeArg.asErasure())) { throw new IllegalArgumentException( - "Schema type mismatch " + typeArg + " vs " + schemaTypes[0]); + "Schema type mismatch " + typeArg.getActualName() + " vs " + schemaTypeArg.getActualName()); } } else { - if (!schemaInputClass.isAssignableFrom(fnInputClass)) { + if (!schemaTypeArg.asErasure().isAssignableFrom(typeArg.asErasure())) { throw new IllegalArgumentException( - "Schema type mismatch " + typeArg + " vs " + schemaTypes[0]); - } - } - } - - - public static void validateFunctionClassTypes(ClassLoader classLoader, - Function.FunctionDetails.Builder functionDetailsBuilder) { - - // validate only if classLoader is provided - if (classLoader == null) { - return; - } - - if (isBlank(functionDetailsBuilder.getClassName())) { - throw new IllegalArgumentException("Function class-name can't be empty"); - } - - // validate function class-type - Class functionClass; - try { - functionClass = classLoader.loadClass(functionDetailsBuilder.getClassName()); - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException( - String.format("Function class %s must be in class path", functionDetailsBuilder.getClassName()), e); - } - Class[] typeArgs = FunctionCommon.getFunctionTypes(functionClass, false); - - if (!(org.apache.pulsar.functions.api.Function.class.isAssignableFrom(functionClass)) - && !(java.util.function.Function.class.isAssignableFrom(functionClass))) { - throw new RuntimeException("User class must either be Function or java.util.Function"); - } - - if (functionDetailsBuilder.hasSource() && functionDetailsBuilder.getSource() != null - && isNotBlank(functionDetailsBuilder.getSource().getClassName())) { - try { - String sourceClassName = functionDetailsBuilder.getSource().getClassName(); - String argClassName = FunctionCommon.getTypeArg(sourceClassName, Source.class, classLoader).getName(); - functionDetailsBuilder - .setSource(functionDetailsBuilder.getSourceBuilder().setTypeClassName(argClassName)); - - // if sink-class not present then set same arg as source - if (!functionDetailsBuilder.hasSink() || isBlank(functionDetailsBuilder.getSink().getClassName())) { - functionDetailsBuilder - .setSink(functionDetailsBuilder.getSinkBuilder().setTypeClassName(argClassName)); - } - - } catch (IllegalArgumentException ie) { - throw ie; - } catch (Exception e) { - log.error("Failed to validate source class", e); - throw new IllegalArgumentException("Failed to validate source class-name", e); - } - } else if (isBlank(functionDetailsBuilder.getSourceBuilder().getTypeClassName())) { - // if function-src-class is not present then set function-src type-class according to function class - functionDetailsBuilder - .setSource(functionDetailsBuilder.getSourceBuilder().setTypeClassName(typeArgs[0].getName())); - } - - if (functionDetailsBuilder.hasSink() && functionDetailsBuilder.getSink() != null - && isNotBlank(functionDetailsBuilder.getSink().getClassName())) { - try { - String sinkClassName = functionDetailsBuilder.getSink().getClassName(); - String argClassName = FunctionCommon.getTypeArg(sinkClassName, Sink.class, classLoader).getName(); - functionDetailsBuilder.setSink(functionDetailsBuilder.getSinkBuilder().setTypeClassName(argClassName)); - - // if source-class not present then set same arg as sink - if (!functionDetailsBuilder.hasSource() || isBlank(functionDetailsBuilder.getSource().getClassName())) { - functionDetailsBuilder - .setSource(functionDetailsBuilder.getSourceBuilder().setTypeClassName(argClassName)); - } - - } catch (IllegalArgumentException ie) { - throw ie; - } catch (Exception e) { - log.error("Failed to validate sink class", e); - throw new IllegalArgumentException("Failed to validate sink class-name", e); + "Schema type mismatch " + typeArg.getActualName() + " vs " + schemaTypeArg.getActualName()); } - } else if (isBlank(functionDetailsBuilder.getSinkBuilder().getTypeClassName())) { - // if function-sink-class is not present then set function-sink type-class according to function class - functionDetailsBuilder - .setSink(functionDetailsBuilder.getSinkBuilder().setTypeClassName(typeArgs[1].getName())); } } } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionArchive.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionArchive.java index 028b57d69c86b..cfb213f34ed72 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionArchive.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionArchive.java @@ -19,14 +19,50 @@ package org.apache.pulsar.functions.utils.functions; import java.nio.file.Path; -import lombok.Builder; -import lombok.Data; import org.apache.pulsar.common.functions.FunctionDefinition; +import org.apache.pulsar.functions.utils.FunctionFilePackage; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; -@Builder -@Data -public class FunctionArchive { - private Path archivePath; - private ClassLoader classLoader; - private FunctionDefinition functionDefinition; +public class FunctionArchive implements AutoCloseable { + private final Path archivePath; + private final FunctionDefinition functionDefinition; + private final String narExtractionDirectory; + private final boolean enableClassloading; + private ValidatableFunctionPackage functionPackage; + private boolean closed; + + public FunctionArchive(Path archivePath, FunctionDefinition functionDefinition, String narExtractionDirectory, + boolean enableClassloading) { + this.archivePath = archivePath; + this.functionDefinition = functionDefinition; + this.narExtractionDirectory = narExtractionDirectory; + this.enableClassloading = enableClassloading; + } + + public Path getArchivePath() { + return archivePath; + } + + public synchronized ValidatableFunctionPackage getFunctionPackage() { + if (closed) { + throw new IllegalStateException("FunctionArchive is already closed"); + } + if (functionPackage == null) { + functionPackage = new FunctionFilePackage(archivePath.toFile(), narExtractionDirectory, enableClassloading, + FunctionDefinition.class); + } + return functionPackage; + } + + public FunctionDefinition getFunctionDefinition() { + return functionDefinition; + } + + @Override + public synchronized void close() throws Exception { + closed = true; + if (functionPackage instanceof AutoCloseable) { + ((AutoCloseable) functionPackage).close(); + } + } } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionUtils.java index 941df573e495e..31a5540e0bfaf 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/functions/FunctionUtils.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.apache.pulsar.functions.utils.functions; import java.io.File; @@ -30,10 +31,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.functions.FunctionDefinition; import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.nar.NarClassLoaderBuilder; import org.apache.pulsar.common.util.ObjectMapperFactory; -import org.apache.pulsar.functions.api.Function; -import org.apache.pulsar.functions.utils.Exceptions; +import org.zeroturnaround.zip.ZipUtil; @UtilityClass @@ -45,43 +44,40 @@ public class FunctionUtils { /** * Extract the Pulsar Function class from a function or archive. */ - public static String getFunctionClass(ClassLoader classLoader) throws IOException { - NarClassLoader ncl = (NarClassLoader) classLoader; - String configStr = ncl.getServiceDefinition(PULSAR_IO_SERVICE_NAME); - - FunctionDefinition conf = ObjectMapperFactory.getYamlMapper().reader().readValue(configStr, - FunctionDefinition.class); - if (StringUtils.isEmpty(conf.getFunctionClass())) { - throw new IOException( - String.format("The '%s' functionctor does not provide a function implementation", conf.getName())); - } + public static String getFunctionClass(File narFile) throws IOException { + return getFunctionDefinition(narFile).getFunctionClass(); + } - try { - // Try to load source class and check it implements Function interface - Class functionClass = ncl.loadClass(conf.getFunctionClass()); - if (!(Function.class.isAssignableFrom(functionClass))) { - throw new IOException( - "Class " + conf.getFunctionClass() + " does not implement interface " + Function.class - .getName()); - } - } catch (Throwable t) { - Exceptions.rethrowIOException(t); + public static FunctionDefinition getFunctionDefinition(File narFile) throws IOException { + return getPulsarIOServiceConfig(narFile, FunctionDefinition.class); + } + + public static T getPulsarIOServiceConfig(File narFile, Class valueType) throws IOException { + String filename = "META-INF/services/" + PULSAR_IO_SERVICE_NAME; + byte[] configEntry = ZipUtil.unpackEntry(narFile, filename); + if (configEntry != null) { + return ObjectMapperFactory.getYamlMapper().reader().readValue(configEntry, valueType); + } else { + return null; } + } - return conf.getFunctionClass(); + public static String getFunctionClass(NarClassLoader narClassLoader) throws IOException { + return getFunctionDefinition(narClassLoader).getFunctionClass(); } public static FunctionDefinition getFunctionDefinition(NarClassLoader narClassLoader) throws IOException { - String configStr = narClassLoader.getServiceDefinition(PULSAR_IO_SERVICE_NAME); - return ObjectMapperFactory.getYamlMapper().reader().readValue(configStr, FunctionDefinition.class); + return getPulsarIOServiceConfig(narClassLoader, FunctionDefinition.class); } - public static TreeMap searchForFunctions(String functionsDirectory) throws IOException { - return searchForFunctions(functionsDirectory, false); + public static T getPulsarIOServiceConfig(NarClassLoader narClassLoader, Class valueType) throws IOException { + return ObjectMapperFactory.getYamlMapper().reader() + .readValue(narClassLoader.getServiceDefinition(PULSAR_IO_SERVICE_NAME), valueType); } public static TreeMap searchForFunctions(String functionsDirectory, - boolean alwaysPopulatePath) throws IOException { + String narExtractionDirectory, + boolean enableClassloading) throws IOException { Path path = Paths.get(functionsDirectory).toAbsolutePath(); log.info("Searching for functions in {}", path); @@ -95,22 +91,12 @@ public static TreeMap searchForFunctions(String functio try (DirectoryStream stream = Files.newDirectoryStream(path, "*.nar")) { for (Path archive : stream) { try { - - NarClassLoader ncl = NarClassLoaderBuilder.builder() - .narFile(new File(archive.toString())) - .build(); - - FunctionArchive.FunctionArchiveBuilder functionArchiveBuilder = FunctionArchive.builder(); - FunctionDefinition cntDef = FunctionUtils.getFunctionDefinition(ncl); + FunctionDefinition cntDef = FunctionUtils.getFunctionDefinition(archive.toFile()); log.info("Found function {} from {}", cntDef, archive); - - functionArchiveBuilder.archivePath(archive); - - functionArchiveBuilder.classLoader(ncl); - functionArchiveBuilder.functionDefinition(cntDef); - - if (alwaysPopulatePath || !StringUtils.isEmpty(cntDef.getFunctionClass())) { - functions.put(cntDef.getName(), functionArchiveBuilder.build()); + if (!StringUtils.isEmpty(cntDef.getFunctionClass())) { + FunctionArchive functionArchive = + new FunctionArchive(archive, cntDef, narExtractionDirectory, enableClassloading); + functions.put(cntDef.getName(), functionArchive); } } catch (Throwable t) { log.warn("Failed to load function from {}", archive, t); diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/Connector.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/Connector.java index f1a03f4424ec6..5fcc22747c516 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/Connector.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/Connector.java @@ -20,17 +20,79 @@ import java.nio.file.Path; import java.util.List; -import lombok.Builder; -import lombok.Data; +import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.io.ConfigFieldDefinition; import org.apache.pulsar.common.io.ConnectorDefinition; +import org.apache.pulsar.functions.utils.FunctionFilePackage; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; -@Builder -@Data -public class Connector { - private Path archivePath; +public class Connector implements AutoCloseable { + private final Path archivePath; + private final String narExtractionDirectory; + private final boolean enableClassloading; + private ValidatableFunctionPackage connectorFunctionPackage; private List sourceConfigFieldDefinitions; private List sinkConfigFieldDefinitions; - private ClassLoader classLoader; private ConnectorDefinition connectorDefinition; + private boolean closed; + + public Connector(Path archivePath, ConnectorDefinition connectorDefinition, String narExtractionDirectory, + boolean enableClassloading) { + this.archivePath = archivePath; + this.connectorDefinition = connectorDefinition; + this.narExtractionDirectory = narExtractionDirectory; + this.enableClassloading = enableClassloading; + } + + public Path getArchivePath() { + return archivePath; + } + + public synchronized ValidatableFunctionPackage getConnectorFunctionPackage() { + checkState(); + if (connectorFunctionPackage == null) { + connectorFunctionPackage = + new FunctionFilePackage(archivePath.toFile(), narExtractionDirectory, enableClassloading, + ConnectorDefinition.class); + } + return connectorFunctionPackage; + } + + private void checkState() { + if (closed) { + throw new IllegalStateException("Connector is already closed"); + } + } + + public synchronized List getSourceConfigFieldDefinitions() { + checkState(); + if (sourceConfigFieldDefinitions == null && !StringUtils.isEmpty(connectorDefinition.getSourceClass()) + && !StringUtils.isEmpty(connectorDefinition.getSourceConfigClass())) { + sourceConfigFieldDefinitions = ConnectorUtils.getConnectorConfigDefinition(getConnectorFunctionPackage(), + connectorDefinition.getSourceConfigClass()); + } + return sourceConfigFieldDefinitions; + } + + public synchronized List getSinkConfigFieldDefinitions() { + checkState(); + if (sinkConfigFieldDefinitions == null && !StringUtils.isEmpty(connectorDefinition.getSinkClass()) + && !StringUtils.isEmpty(connectorDefinition.getSinkConfigClass())) { + sinkConfigFieldDefinitions = ConnectorUtils.getConnectorConfigDefinition(getConnectorFunctionPackage(), + connectorDefinition.getSinkConfigClass()); + } + return sinkConfigFieldDefinitions; + } + + public ConnectorDefinition getConnectorDefinition() { + return connectorDefinition; + } + + @Override + public synchronized void close() throws Exception { + closed = true; + if (connectorFunctionPackage instanceof AutoCloseable) { + ((AutoCloseable) connectorFunctionPackage).close(); + } + } } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/ConnectorUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/ConnectorUtils.java index a814bf35548f3..df1310965f392 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/ConnectorUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/io/ConnectorUtils.java @@ -18,38 +18,31 @@ */ package org.apache.pulsar.functions.utils.io; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.File; import java.io.IOException; -import java.lang.annotation.Annotation; -import java.lang.reflect.Field; -import java.lang.reflect.Method; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.AbstractMap; -import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.TreeMap; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.stream.Collectors; import lombok.experimental.UtilityClass; import lombok.extern.slf4j.Slf4j; +import net.bytebuddy.description.annotation.AnnotationDescription; +import net.bytebuddy.description.annotation.AnnotationValue; +import net.bytebuddy.description.field.FieldDescription; +import net.bytebuddy.description.method.MethodDescription; +import net.bytebuddy.description.type.TypeDefinition; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.io.ConfigFieldDefinition; import org.apache.pulsar.common.io.ConnectorDefinition; import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.nar.NarClassLoaderBuilder; -import org.apache.pulsar.common.util.FutureUtil; -import org.apache.pulsar.common.util.ObjectMapperFactory; -import org.apache.pulsar.common.util.Reflections; import org.apache.pulsar.functions.utils.Exceptions; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; +import org.apache.pulsar.functions.utils.functions.FunctionUtils; import org.apache.pulsar.io.core.BatchSource; import org.apache.pulsar.io.core.Sink; import org.apache.pulsar.io.core.Source; @@ -76,7 +69,7 @@ public static String getIOSourceClass(NarClassLoader narClassLoader) throws IOEx Class sourceClass = narClassLoader.loadClass(conf.getSourceClass()); if (!(Source.class.isAssignableFrom(sourceClass) || BatchSource.class.isAssignableFrom(sourceClass))) { throw new IOException(String.format("Class %s does not implement interface %s or %s", - conf.getSourceClass(), Source.class.getName(), BatchSource.class.getName())); + conf.getSourceClass(), Source.class.getName(), BatchSource.class.getName())); } } catch (Throwable t) { Exceptions.rethrowIOException(t); @@ -109,32 +102,36 @@ public static String getIOSinkClass(NarClassLoader narClassLoader) throws IOExce return conf.getSinkClass(); } - public static ConnectorDefinition getConnectorDefinition(NarClassLoader narClassLoader) throws IOException { - String configStr = narClassLoader.getServiceDefinition(PULSAR_IO_SERVICE_NAME); + public static ConnectorDefinition getConnectorDefinition(File narFile) throws IOException { + return FunctionUtils.getPulsarIOServiceConfig(narFile, ConnectorDefinition.class); + } - return ObjectMapperFactory.getYamlMapper().reader().readValue(configStr, ConnectorDefinition.class); + public static ConnectorDefinition getConnectorDefinition(NarClassLoader narClassLoader) throws IOException { + return FunctionUtils.getPulsarIOServiceConfig(narClassLoader, ConnectorDefinition.class); } - public static List getConnectorConfigDefinition(ClassLoader classLoader, - String configClassName) throws Exception { + public static List getConnectorConfigDefinition( + ValidatableFunctionPackage connectorFunctionPackage, + String configClassName) { List retval = new LinkedList<>(); - Class configClass = classLoader.loadClass(configClassName); - for (Field field : Reflections.getAllFields(configClass)) { - if (java.lang.reflect.Modifier.isStatic(field.getModifiers())) { - // We dont want static fields + TypeDefinition configClass = connectorFunctionPackage.resolveType(configClassName); + + for (FieldDescription field : getAllFields(configClass)) { + if (field.isStatic()) { + // We don't want static fields continue; } - field.setAccessible(true); ConfigFieldDefinition configFieldDefinition = new ConfigFieldDefinition(); configFieldDefinition.setFieldName(field.getName()); - configFieldDefinition.setTypeName(field.getType().getName()); + configFieldDefinition.setTypeName(field.getType().getActualName()); Map attributes = new HashMap<>(); - for (Annotation annotation : field.getAnnotations()) { - if (annotation.annotationType().equals(FieldDoc.class)) { - FieldDoc fieldDoc = (FieldDoc) annotation; - for (Method method : FieldDoc.class.getDeclaredMethods()) { - Object value = method.invoke(fieldDoc); - attributes.put(method.getName(), value == null ? "" : value.toString()); + for (AnnotationDescription annotation : field.getDeclaredAnnotations()) { + if (annotation.getAnnotationType().represents(FieldDoc.class)) { + for (MethodDescription.InDefinedShape method : annotation.getAnnotationType() + .getDeclaredMethods()) { + AnnotationValue value = annotation.getValue(method.getName()); + attributes.put(method.getName(), + value == null || value.resolve() == null ? "" : value.resolve().toString()); } } } @@ -145,86 +142,42 @@ public static List getConnectorConfigDefinition(ClassLoad return retval; } + private static List getAllFields(TypeDefinition type) { + List fields = new LinkedList<>(); + fields.addAll(type.getDeclaredFields()); + + if (type.getSuperClass() != null) { + fields.addAll(getAllFields(type.getSuperClass())); + } + + return fields; + } + public static TreeMap searchForConnectors(String connectorsDirectory, - String narExtractionDirectory) throws IOException { + String narExtractionDirectory, + boolean enableClassloading) throws IOException { Path path = Paths.get(connectorsDirectory).toAbsolutePath(); log.info("Searching for connectors in {}", path); + TreeMap connectors = new TreeMap<>(); + if (!path.toFile().exists()) { log.warn("Connectors archive directory not found"); - return new TreeMap<>(); + return connectors; } - List archives = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(path, "*.nar")) { for (Path archive : stream) { - archives.add(archive); - } - } - if (archives.isEmpty()) { - return new TreeMap<>(); - } - - ExecutorService oneTimeExecutor = null; - try { - int nThreads = Math.min(Runtime.getRuntime().availableProcessors(), archives.size()); - log.info("Loading {} connector definitions with a thread pool of size {}", archives.size(), nThreads); - oneTimeExecutor = Executors.newFixedThreadPool(nThreads, - new ThreadFactoryBuilder().setNameFormat("connector-extraction-executor-%d").build()); - List>> futures = new ArrayList<>(); - for (Path archive : archives) { - CompletableFuture> future = CompletableFuture.supplyAsync(() -> - getConnectorDefinitionEntry(archive, narExtractionDirectory), oneTimeExecutor); - futures.add(future); - } - - FutureUtil.waitForAll(futures).join(); - return futures.stream() - .map(CompletableFuture::join) - .filter(entry -> entry != null) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (a, b) -> a, TreeMap::new)); - } finally { - if (oneTimeExecutor != null) { - oneTimeExecutor.shutdown(); - } - } - } - - private static Map.Entry getConnectorDefinitionEntry(Path archive, - String narExtractionDirectory) { - try { - - NarClassLoader ncl = NarClassLoaderBuilder.builder() - .narFile(new File(archive.toString())) - .extractionDirectory(narExtractionDirectory) - .build(); - - Connector.ConnectorBuilder connectorBuilder = Connector.builder(); - ConnectorDefinition cntDef = ConnectorUtils.getConnectorDefinition(ncl); - log.info("Found connector {} from {}", cntDef, archive); - - connectorBuilder.archivePath(archive); - if (!StringUtils.isEmpty(cntDef.getSourceClass())) { - if (!StringUtils.isEmpty(cntDef.getSourceConfigClass())) { - connectorBuilder.sourceConfigFieldDefinitions( - ConnectorUtils.getConnectorConfigDefinition(ncl, - cntDef.getSourceConfigClass())); + try { + ConnectorDefinition cntDef = ConnectorUtils.getConnectorDefinition(archive.toFile()); + log.info("Found connector {} from {}", cntDef, archive); + Connector connector = new Connector(archive, cntDef, narExtractionDirectory, enableClassloading); + connectors.put(cntDef.getName(), connector); + } catch (Throwable t) { + log.warn("Failed to load connector from {}", archive, t); } } - - if (!StringUtils.isEmpty(cntDef.getSinkClass())) { - if (!StringUtils.isEmpty(cntDef.getSinkConfigClass())) { - connectorBuilder.sinkConfigFieldDefinitions( - ConnectorUtils.getConnectorConfigDefinition(ncl, cntDef.getSinkConfigClass())); - } - } - - connectorBuilder.classLoader(ncl); - connectorBuilder.connectorDefinition(cntDef); - return new AbstractMap.SimpleEntry(cntDef.getName(), connectorBuilder.build()); - } catch (Throwable t) { - log.warn("Failed to load connector from {}", archive, t); - return null; } + return connectors; } } diff --git a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionCommonTest.java b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionCommonTest.java index 131f153b08d68..90fdd4da777d3 100644 --- a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionCommonTest.java +++ b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionCommonTest.java @@ -30,16 +30,17 @@ import com.github.tomakehurst.wiremock.WireMockServer; import java.io.File; import java.util.Collection; +import java.util.concurrent.CompletableFuture; import lombok.Cleanup; +import net.bytebuddy.description.type.TypeDefinition; +import net.bytebuddy.pool.TypePool; import org.apache.pulsar.client.impl.MessageIdImpl; -import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.functions.api.Context; import org.apache.pulsar.functions.api.Function; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.api.WindowContext; import org.apache.pulsar.functions.api.WindowFunction; import org.assertj.core.util.Files; -import org.testng.Assert; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -47,41 +48,6 @@ * Unit test of {@link Exceptions}. */ public class FunctionCommonTest { - - @Test - public void testValidateLocalFileUrl() throws Exception { - String fileLocation = FutureUtil.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - try { - // eg: fileLocation : /dir/fileName.jar (invalid) - FunctionCommon.extractClassLoader(fileLocation); - Assert.fail("should fail with invalid url: without protocol"); - } catch (IllegalArgumentException ie) { - // Ok.. expected exception - } - String fileLocationWithProtocol = "file://" + fileLocation; - // eg: fileLocation : file:///dir/fileName.jar (valid) - FunctionCommon.extractClassLoader(fileLocationWithProtocol); - // eg: fileLocation : file:/dir/fileName.jar (valid) - fileLocationWithProtocol = "file:" + fileLocation; - FunctionCommon.extractClassLoader(fileLocationWithProtocol); - } - - @Test - public void testValidateHttpFileUrl() throws Exception { - - String jarHttpUrl = "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; - FunctionCommon.extractClassLoader(jarHttpUrl); - - jarHttpUrl = "http://_invalidurl_.com"; - try { - // eg: fileLocation : /dir/fileName.jar (invalid) - FunctionCommon.extractClassLoader(jarHttpUrl); - Assert.fail("should fail with invalid url: without protocol"); - } catch (Exception ie) { - // Ok.. expected exception - } - } - @Test public void testDownloadFile() throws Exception { final String jarHttpUrl = "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; @@ -150,6 +116,14 @@ public Record process(String input, Context context) throws Exception { } }, false }, + { + new Function>() { + @Override + public CompletableFuture process(String input, Context context) throws Exception { + return null; + } + }, false + }, { new java.util.function.Function() { @Override @@ -166,6 +140,14 @@ public Record apply(String s) { } }, false }, + { + new java.util.function.Function>() { + @Override + public CompletableFuture apply(String s) { + return null; + } + }, false + }, { new WindowFunction() { @Override @@ -182,6 +164,14 @@ public Record process(Collection> input, WindowContext c } }, true }, + { + new WindowFunction>() { + @Override + public CompletableFuture process(Collection> input, WindowContext context) throws Exception { + return null; + } + }, true + }, { new java.util.function.Function, Integer>() { @Override @@ -197,15 +187,26 @@ public Record apply(Collection strings) { return null; } }, true + }, + { + new java.util.function.Function, CompletableFuture>() { + @Override + public CompletableFuture apply(Collection strings) { + return null; + } + }, true } }; } @Test(dataProvider = "function") public void testGetFunctionTypes(Object function, boolean isWindowConfigPresent) { - Class[] types = FunctionCommon.getFunctionTypes(function.getClass(), isWindowConfigPresent); + TypePool typePool = TypePool.Default.of(function.getClass().getClassLoader()); + TypeDefinition[] types = + FunctionCommon.getFunctionTypes(typePool.describe(function.getClass().getName()).resolve(), + isWindowConfigPresent); assertEquals(types.length, 2); - assertEquals(types[0], String.class); - assertEquals(types[1], Integer.class); + assertEquals(types[0].asErasure().getTypeName(), String.class.getName()); + assertEquals(types[1].asErasure().getTypeName(), Integer.class.getName()); } } diff --git a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionConfigUtilsTest.java b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionConfigUtilsTest.java index 8b4470d8c76c7..954eef44a7366 100644 --- a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionConfigUtilsTest.java +++ b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/FunctionConfigUtilsTest.java @@ -18,13 +18,24 @@ */ package org.apache.pulsar.functions.utils; +import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE; +import static org.apache.pulsar.common.functions.FunctionConfig.Runtime.PYTHON; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; import com.google.gson.Gson; - -import org.apache.pulsar.client.api.CompressionType; -import org.apache.pulsar.client.api.SubscriptionInitialPosition; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.util.JsonFormat; +import java.lang.reflect.Field; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.client.api.CompressionType; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.impl.schema.JSONSchema; import org.apache.pulsar.common.functions.ConsumerConfig; import org.apache.pulsar.common.functions.FunctionConfig; @@ -32,28 +43,29 @@ import org.apache.pulsar.common.functions.Resources; import org.apache.pulsar.common.functions.WindowConfig; import org.apache.pulsar.common.util.Reflections; +import org.apache.pulsar.functions.api.Record; +import org.apache.pulsar.functions.api.WindowContext; +import org.apache.pulsar.functions.api.WindowFunction; import org.apache.pulsar.functions.api.utils.IdentityFunction; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.FunctionDetails; import org.testng.annotations.Test; -import java.lang.reflect.Field; -import java.util.HashMap; -import java.util.Map; - -import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE; -import static org.apache.pulsar.common.functions.FunctionConfig.Runtime.PYTHON; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertNull; -import static org.testng.Assert.assertThrows; -import static org.testng.Assert.assertTrue; - /** * Unit test of {@link Reflections}. */ @Slf4j public class FunctionConfigUtilsTest { + public static class WordCountWindowFunction implements WindowFunction { + @Override + public Void process(Collection> inputs, WindowContext context) throws Exception { + for (Record input : inputs) { + Arrays.asList(input.getValue().split("\\.")).forEach(word -> context.incrCounter(word, 1)); + } + return null; + } + } + @Test public void testAutoAckConvertFailed() { @@ -63,7 +75,7 @@ public void testAutoAckConvertFailed() { functionConfig.setProcessingGuarantees(FunctionConfig.ProcessingGuarantees.ATMOST_ONCE); assertThrows(IllegalArgumentException.class, () -> { - FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + FunctionConfigUtils.convert(functionConfig); }); } @@ -99,7 +111,7 @@ public void testConvertBackFidelity() { producerConfig.setBatchBuilder("DEFAULT"); producerConfig.setCompressionType(CompressionType.ZLIB); functionConfig.setProducerConfig(producerConfig); - Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig); FunctionConfig convertedConfig = FunctionConfigUtils.convertFromDetails(functionDetails); // add default resources @@ -119,7 +131,7 @@ public void testConvertWindow() { functionConfig.setNamespace("test-namespace"); functionConfig.setName("test-function"); functionConfig.setParallelism(1); - functionConfig.setClassName(IdentityFunction.class.getName()); + functionConfig.setClassName(WordCountWindowFunction.class.getName()); Map inputSpecs = new HashMap<>(); inputSpecs.put("test-input", ConsumerConfig.builder().isRegexPattern(true).serdeClassName("test-serde").build()); functionConfig.setInputSpecs(inputSpecs); @@ -141,7 +153,7 @@ public void testConvertWindow() { producerConfig.setBatchBuilder("KEY_BASED"); producerConfig.setCompressionType(CompressionType.SNAPPY); functionConfig.setProducerConfig(producerConfig); - Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig); FunctionConfig convertedConfig = FunctionConfigUtils.convertFromDetails(functionDetails); // WindowsFunction guarantees convert to FunctionGuarantees. @@ -158,6 +170,18 @@ public void testConvertWindow() { ); } + @Test + public void testConvertBatchBuilder() { + FunctionConfig functionConfig = createFunctionConfig(); + functionConfig.setBatchBuilder("KEY_BASED"); + + Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig); + assertEquals(functionDetails.getSink().getProducerSpec().getBatchBuilder(), "KEY_BASED"); + + FunctionConfig convertedConfig = FunctionConfigUtils.convertFromDetails(functionDetails); + assertEquals(convertedConfig.getProducerConfig().getBatchBuilder(), "KEY_BASED"); + } + @Test public void testMergeEqual() { FunctionConfig functionConfig = createFunctionConfig(); @@ -429,6 +453,30 @@ public void testMergeDifferentWindowConfig() { ); } + @Test + public void testMergeDifferentProducerConfig() { + FunctionConfig functionConfig = createFunctionConfig(); + + ProducerConfig producerConfig = new ProducerConfig(); + producerConfig.setMaxPendingMessages(100); + producerConfig.setMaxPendingMessagesAcrossPartitions(1000); + producerConfig.setUseThreadLocalProducers(true); + producerConfig.setBatchBuilder("DEFAULT"); + producerConfig.setCompressionType(CompressionType.ZLIB); + FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("producerConfig", producerConfig); + + FunctionConfig mergedConfig = FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig); + assertEquals( + mergedConfig.getProducerConfig(), + producerConfig + ); + mergedConfig.setProducerConfig(functionConfig.getProducerConfig()); + assertEquals( + new Gson().toJson(functionConfig), + new Gson().toJson(mergedConfig) + ); + } + @Test public void testMergeDifferentTimeout() { FunctionConfig functionConfig = createFunctionConfig(); @@ -483,7 +531,6 @@ private FunctionConfig createFunctionConfig() { functionConfig.setUserConfig(new HashMap<>()); functionConfig.setAutoAck(true); functionConfig.setTimeoutMs(2000L); - functionConfig.setWindowConfig(new WindowConfig().setWindowLengthCount(10)); functionConfig.setCleanupSubscription(true); functionConfig.setRuntimeFlags("-Dfoo=bar"); return functionConfig; @@ -517,7 +564,7 @@ public void testDisableForwardSourceMessageProperty() throws InvalidProtocolBuff config.setForwardSourceMessageProperty(true); FunctionConfigUtils.inferMissingArguments(config, false); assertNull(config.getForwardSourceMessageProperty()); - FunctionDetails details = FunctionConfigUtils.convert(config, FunctionConfigUtilsTest.class.getClassLoader()); + FunctionDetails details = FunctionConfigUtils.convert(config); assertFalse(details.getSink().getForwardSourceMessageProperty()); String detailsJson = "'" + JsonFormat.printer().omittingInsignificantWhitespace().print(details) + "'"; log.info("Function details : {}", detailsJson); @@ -604,7 +651,7 @@ public void testMergeDifferentOutputSchemaTypes() { @Test public void testPoolMessages() { FunctionConfig functionConfig = createFunctionConfig(); - Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + Function.FunctionDetails functionDetails = FunctionConfigUtils.convert(functionConfig); assertFalse(functionDetails.getSource().getInputSpecsMap().get("test-input").getPoolMessages()); FunctionConfig convertedConfig = FunctionConfigUtils.convertFromDetails(functionDetails); assertFalse(convertedConfig.getInputSpecs().get("test-input").isPoolMessages()); @@ -614,7 +661,7 @@ public void testPoolMessages() { .poolMessages(true).build()); functionConfig.setInputSpecs(inputSpecs); - functionDetails = FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + functionDetails = FunctionConfigUtils.convert(functionConfig); assertTrue(functionDetails.getSource().getInputSpecsMap().get("test-input").getPoolMessages()); convertedConfig = FunctionConfigUtils.convertFromDetails(functionDetails); diff --git a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SinkConfigUtilsTest.java b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SinkConfigUtilsTest.java index 62d6f68d31b2c..14cd77f60ff95 100644 --- a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SinkConfigUtilsTest.java +++ b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SinkConfigUtilsTest.java @@ -18,37 +18,38 @@ */ package org.apache.pulsar.functions.utils; +import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE; +import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.ATMOST_ONCE; +import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.expectThrows; import com.google.common.collect.Lists; import com.google.gson.Gson; +import java.io.IOException; +import java.lang.reflect.Field; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import lombok.Data; import lombok.NoArgsConstructor; import lombok.experimental.Accessors; import org.apache.pulsar.common.functions.ConsumerConfig; import org.apache.pulsar.common.functions.FunctionConfig; import org.apache.pulsar.common.functions.Resources; +import org.apache.pulsar.common.io.ConnectorDefinition; import org.apache.pulsar.common.io.SinkConfig; import org.apache.pulsar.config.validation.ConfigValidationAnnotations; -import org.apache.pulsar.functions.api.utils.IdentityFunction; +import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.proto.Function; +import org.apache.pulsar.io.core.Sink; +import org.apache.pulsar.io.core.SinkContext; import org.testng.annotations.Test; -import java.io.IOException; -import java.lang.reflect.Field; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE; -import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.ATMOST_ONCE; -import static org.apache.pulsar.common.functions.FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertThrows; -import static org.testng.Assert.assertTrue; -import static org.testng.Assert.expectThrows; - /** * Unit test of {@link SinkConfigUtilsTest}. */ @@ -62,6 +63,27 @@ public static class TestSinkConfig { private String configParameter; } + + public static class NopSink implements Sink { + + @Override + public void open(Map config, SinkContext sinkContext) throws Exception { + + } + + @Override + public void write(Record record) throws Exception { + + } + + @Override + public void close() throws Exception { + + } + } + + + @Test public void testAutoAckConvertFailed() throws IOException { @@ -151,6 +173,7 @@ public void testParseRetainOrderingField() throws IOException { SinkConfig sinkConfig = createSinkConfig(); sinkConfig.setRetainOrdering(testcase); Function.FunctionDetails functionDetails = SinkConfigUtils.convert(sinkConfig, new SinkConfigUtils.ExtractedSinkDetails(null, null, null)); + assertEquals(functionDetails.getRetainOrdering(), testcase != null ? testcase : Boolean.valueOf(false)); SinkConfig result = SinkConfigUtils.convertFromDetails(functionDetails); assertEquals(result.getRetainOrdering(), testcase != null ? testcase : Boolean.valueOf(false)); } @@ -163,6 +186,7 @@ public void testParseKeyRetainOrderingField() throws IOException { SinkConfig sinkConfig = createSinkConfig(); sinkConfig.setRetainKeyOrdering(testcase); Function.FunctionDetails functionDetails = SinkConfigUtils.convert(sinkConfig, new SinkConfigUtils.ExtractedSinkDetails(null, null, null)); + assertEquals(functionDetails.getRetainKeyOrdering(), testcase != null ? testcase : Boolean.valueOf(false)); SinkConfig result = SinkConfigUtils.convertFromDetails(functionDetails); assertEquals(result.getRetainKeyOrdering(), testcase != null ? testcase : Boolean.valueOf(false)); } @@ -519,7 +543,7 @@ private SinkConfig createSinkConfig() { sinkConfig.setNamespace("test-namespace"); sinkConfig.setName("test-sink"); sinkConfig.setParallelism(1); - sinkConfig.setClassName(IdentityFunction.class.getName()); + sinkConfig.setClassName(NopSink.class.getName()); Map inputSpecs = new HashMap<>(); inputSpecs.put("test-input", ConsumerConfig.builder().isRegexPattern(true).serdeClassName("test-serde").build()); sinkConfig.setInputSpecs(inputSpecs); @@ -575,13 +599,16 @@ public void testAllowDisableSinkTimeout() { SinkConfig sinkConfig = createSinkConfig(); sinkConfig.setInputSpecs(null); sinkConfig.setTopicsPattern("my-topic-*"); - SinkConfigUtils.validateAndExtractDetails(sinkConfig, this.getClass().getClassLoader(), null, + LoadedFunctionPackage validatableFunction = + new LoadedFunctionPackage(this.getClass().getClassLoader(), ConnectorDefinition.class); + + SinkConfigUtils.validateAndExtractDetails(sinkConfig, validatableFunction, null, true); sinkConfig.setTimeoutMs(null); - SinkConfigUtils.validateAndExtractDetails(sinkConfig, this.getClass().getClassLoader(), null, + SinkConfigUtils.validateAndExtractDetails(sinkConfig, validatableFunction, null, true); sinkConfig.setTimeoutMs(0L); - SinkConfigUtils.validateAndExtractDetails(sinkConfig, this.getClass().getClassLoader(), null, + SinkConfigUtils.validateAndExtractDetails(sinkConfig, validatableFunction, null, true); } } diff --git a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SourceConfigUtilsTest.java b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SourceConfigUtilsTest.java index 49313dbf02c62..a4da4203d9641 100644 --- a/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SourceConfigUtilsTest.java +++ b/pulsar-functions/utils/src/test/java/org/apache/pulsar/functions/utils/SourceConfigUtilsTest.java @@ -285,6 +285,30 @@ public void testMergeDifferentBatchSourceConfig() { ); } + @Test + public void testMergeDifferentProducerConfig() { + SourceConfig sourceConfig = createSourceConfig(); + + ProducerConfig producerConfig = new ProducerConfig(); + producerConfig.setMaxPendingMessages(100); + producerConfig.setMaxPendingMessagesAcrossPartitions(1000); + producerConfig.setUseThreadLocalProducers(true); + producerConfig.setBatchBuilder("DEFAULT"); + producerConfig.setCompressionType(CompressionType.ZLIB); + SourceConfig newSourceConfig = createUpdatedSourceConfig("producerConfig", producerConfig); + + SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig); + assertEquals( + mergedConfig.getProducerConfig(), + producerConfig + ); + mergedConfig.setProducerConfig(sourceConfig.getProducerConfig()); + assertEquals( + new Gson().toJson(sourceConfig), + new Gson().toJson(mergedConfig) + ); + } + @Test public void testValidateConfig() { SourceConfig sourceConfig = createSourceConfig(); diff --git a/pulsar-functions/worker/pom.xml b/pulsar-functions/worker/pom.xml index c6db572f57d7e..cbfd2bf37aac0 100644 --- a/pulsar-functions/worker/pom.xml +++ b/pulsar-functions/worker/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-functions 3.1.0-SNAPSHOT .. - pulsar-functions-worker Pulsar Functions :: Worker - - ${project.groupId} pulsar-broker-common ${project.version} - ${project.groupId} pulsar-functions-runtime ${project.version} - ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} pulsar-client-admin-original ${project.version} - org.glassfish.jersey.media jersey-media-json-jackson - jakarta.activation jakarta.activation-api - org.glassfish.jersey.core jersey-server - org.glassfish.jersey.containers jersey-container-servlet - org.glassfish.jersey.containers jersey-container-servlet-core - org.glassfish.jersey.media jersey-media-multipart - org.eclipse.jetty jetty-server - org.eclipse.jetty jetty-alpn-conscrypt-server - org.eclipse.jetty jetty-servlet - org.eclipse.jetty jetty-servlets - org.apache.distributedlog distributedlog-core @@ -118,17 +100,14 @@ - org.apache.bookkeeper bookkeeper-server - commons-io commons-io - ${project.groupId} pulsar-docs-tools @@ -140,7 +119,6 @@ - io.swagger swagger-core @@ -156,14 +134,11 @@ - io.prometheus simpleclient_jetty - - ${project.groupId} pulsar-io-cassandra @@ -171,7 +146,6 @@ pom test - ${project.groupId} pulsar-io-twitter @@ -179,7 +153,6 @@ pom test - ${project.groupId} pulsar-functions-api-examples @@ -187,7 +160,6 @@ pom test - ${project.groupId} pulsar-functions-api-examples-builtin @@ -195,9 +167,7 @@ pom test - - @@ -217,7 +187,6 @@ - maven-dependency-plugin @@ -270,7 +239,6 @@ - org.apache.maven.plugins maven-surefire-plugin @@ -286,7 +254,6 @@ - org.apache.maven.plugins @@ -334,5 +301,4 @@ - diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java index 03c6eb7921840..389051fce4217 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java @@ -70,6 +70,7 @@ import org.apache.pulsar.functions.utils.Actions; import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.SourceConfigUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.io.Connector; @Data @@ -82,18 +83,21 @@ public class FunctionActioner { private final ConnectorsManager connectorsManager; private final FunctionsManager functionsManager; private final PulsarAdmin pulsarAdmin; + private final PackageUrlValidator packageUrlValidator; public FunctionActioner(WorkerConfig workerConfig, RuntimeFactory runtimeFactory, Namespace dlogNamespace, ConnectorsManager connectorsManager, - FunctionsManager functionsManager, PulsarAdmin pulsarAdmin) { + FunctionsManager functionsManager, PulsarAdmin pulsarAdmin, + PackageUrlValidator packageUrlValidator) { this.workerConfig = workerConfig; this.runtimeFactory = runtimeFactory; this.dlogNamespace = dlogNamespace; this.connectorsManager = connectorsManager; this.functionsManager = functionsManager; this.pulsarAdmin = pulsarAdmin; + this.packageUrlValidator = packageUrlValidator; } @@ -151,6 +155,9 @@ private String getPackageFile(FunctionMetaData functionMetaData, FunctionDetails boolean isPkgUrlProvided = isFunctionPackageUrlSupported(packagePath); String packageFile; if (isPkgUrlProvided && packagePath.startsWith(FILE)) { + if (!packageUrlValidator.isValidPackageUrl(componentType, packagePath)) { + throw new IllegalArgumentException("Package URL " + packagePath + " is not valid"); + } URL url = new URL(packagePath); File pkgFile = new File(url.toURI()); packageFile = pkgFile.getAbsolutePath(); @@ -167,7 +174,7 @@ private String getPackageFile(FunctionMetaData functionMetaData, FunctionDetails pkgDir, new File(getDownloadFileName(functionMetaData.getFunctionDetails(), pkgLocation)).getName()); - downloadFile(pkgFile, isPkgUrlProvided, functionMetaData, instanceId, pkgLocation); + downloadFile(pkgFile, isPkgUrlProvided, functionMetaData, instanceId, pkgLocation, componentType); packageFile = pkgFile.getAbsolutePath(); } return packageFile; @@ -226,7 +233,8 @@ InstanceConfig createInstanceConfig(FunctionDetails functionDetails, Function.Fu } private void downloadFile(File pkgFile, boolean isPkgUrlProvided, FunctionMetaData functionMetaData, - int instanceId, Function.PackageLocationMetaData pkgLocation) + int instanceId, Function.PackageLocationMetaData pkgLocation, + FunctionDetails.ComponentType componentType) throws IOException, PulsarAdminException { FunctionDetails details = functionMetaData.getFunctionDetails(); @@ -251,6 +259,9 @@ private void downloadFile(File pkgFile, boolean isPkgUrlProvided, FunctionMetaDa downloadFromHttp ? pkgLocationPath : pkgLocation); if (downloadFromHttp) { + if (!packageUrlValidator.isValidPackageUrl(componentType, pkgLocationPath)) { + throw new IllegalArgumentException("Package URL " + pkgLocationPath + " is not valid"); + } FunctionCommon.downloadFromHttpUrl(pkgLocationPath, tempPkgFile); } else if (downloadFromPackageManagementService) { getPulsarAdmin().packages().download(pkgLocationPath, tempPkgFile.getPath()); @@ -527,7 +538,7 @@ private File getBuiltinArchive(FunctionDetails.ComponentType componentType, Func builder.setClassName(sourceClass); functionDetails.setSource(builder); - fillSourceTypeClass(functionDetails, connector.getClassLoader(), sourceClass); + fillSourceTypeClass(functionDetails, connector.getConnectorFunctionPackage(), sourceClass); return archive; } } @@ -543,7 +554,7 @@ private File getBuiltinArchive(FunctionDetails.ComponentType componentType, Func builder.setClassName(sinkClass); functionDetails.setSink(builder); - fillSinkTypeClass(functionDetails, connector.getClassLoader(), sinkClass); + fillSinkTypeClass(functionDetails, connector.getConnectorFunctionPackage(), sinkClass); return archive; } } @@ -557,8 +568,8 @@ private File getBuiltinArchive(FunctionDetails.ComponentType componentType, Func } private void fillSourceTypeClass(FunctionDetails.Builder functionDetails, - ClassLoader narClassLoader, String className) throws ClassNotFoundException { - String typeArg = getSourceType(className, narClassLoader).getName(); + ValidatableFunctionPackage functionPackage, String className) { + String typeArg = getSourceType(className, functionPackage.getTypePool()).asErasure().getName(); SourceSpec.Builder sourceBuilder = SourceSpec.newBuilder(functionDetails.getSource()); sourceBuilder.setTypeClassName(typeArg); @@ -573,8 +584,8 @@ private void fillSourceTypeClass(FunctionDetails.Builder functionDetails, } private void fillSinkTypeClass(FunctionDetails.Builder functionDetails, - ClassLoader narClassLoader, String className) throws ClassNotFoundException { - String typeArg = getSinkType(className, narClassLoader).getName(); + ValidatableFunctionPackage functionPackage, String className) { + String typeArg = getSinkType(className, functionPackage.getTypePool()).asErasure().getName(); SinkSpec.Builder sinkBuilder = SinkSpec.newBuilder(functionDetails.getSink()); sinkBuilder.setTypeClassName(typeArg); diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java index 8e6725e93af91..b6e2bbb1ca0f8 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java @@ -220,7 +220,8 @@ public FunctionRuntimeManager(WorkerConfig workerConfig, PulsarWorkerService wor functionAuthProvider, runtimeCustomizer); this.functionActioner = new FunctionActioner(this.workerConfig, runtimeFactory, - dlogNamespace, connectorsManager, functionsManager, workerService.getBrokerAdmin()); + dlogNamespace, connectorsManager, functionsManager, workerService.getBrokerAdmin(), + workerService.getPackageUrlValidator()); this.membershipManager = membershipManager; this.functionMetaDataManager = functionMetaDataManager; diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java index 7f035b5562f24..e7816f06aacc8 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java @@ -41,7 +41,7 @@ public class LeaderService implements AutoCloseable, ConsumerEventListener { private ConsumerImpl consumer; private final WorkerConfig workerConfig; private final PulsarClient pulsarClient; - private boolean isLeader = false; + private volatile boolean isLeader = false; static final String COORDINATION_TOPIC_SUBSCRIPTION = "participants"; @@ -172,7 +172,7 @@ public synchronized void becameInactive(Consumer consumer, int partitionId) { } } - public synchronized boolean isLeader() { + public boolean isLeader() { return isLeader; } diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PackageUrlValidator.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PackageUrlValidator.java new file mode 100644 index 0000000000000..2a8fe8dddb153 --- /dev/null +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PackageUrlValidator.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.worker; + +import java.net.URI; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.apache.pulsar.functions.proto.Function; + +/** + * Validates package URLs for functions and connectors. + * Validates that the package URL is either a file in the connectors or functions directory + * when referencing connector or function files is enabled, or matches one of the additional url patterns. + */ +public class PackageUrlValidator { + private final Path connectionsDirectory; + private final Path functionsDirectory; + private final List additionalConnectionsPatterns; + private final List additionalFunctionsPatterns; + + public PackageUrlValidator(WorkerConfig workerConfig) { + this.connectionsDirectory = resolveDirectory(workerConfig.getEnableReferencingConnectorDirectoryFiles(), + workerConfig.getConnectorsDirectory()); + this.functionsDirectory = resolveDirectory(workerConfig.getEnableReferencingFunctionsDirectoryFiles(), + workerConfig.getFunctionsDirectory()); + this.additionalConnectionsPatterns = + compilePatterns(workerConfig.getAdditionalEnabledConnectorUrlPatterns()); + this.additionalFunctionsPatterns = + compilePatterns(workerConfig.getAdditionalEnabledFunctionsUrlPatterns()); + } + + private static Path resolveDirectory(Boolean enabled, String directory) { + return enabled != null && enabled + ? Path.of(directory).normalize().toAbsolutePath() : null; + } + + private static List compilePatterns(List additionalPatterns) { + return additionalPatterns != null ? additionalPatterns.stream().map(Pattern::compile).collect( + Collectors.toList()) : Collections.emptyList(); + } + + boolean isValidFunctionsPackageUrl(URI functionPkgUrl) { + return doesMatch(functionPkgUrl, functionsDirectory, additionalFunctionsPatterns); + } + + boolean isValidConnectionsPackageUrl(URI functionPkgUrl) { + return doesMatch(functionPkgUrl, connectionsDirectory, additionalConnectionsPatterns); + } + + private boolean doesMatch(URI functionPkgUrl, Path directory, List patterns) { + if (directory != null && "file".equals(functionPkgUrl.getScheme())) { + Path filePath = Path.of(functionPkgUrl.getPath()).normalize().toAbsolutePath(); + if (filePath.startsWith(directory)) { + return true; + } + } + String functionPkgUrlString = functionPkgUrl.normalize().toString(); + for (Pattern pattern : patterns) { + if (pattern.matcher(functionPkgUrlString).matches()) { + return true; + } + } + return false; + } + + public boolean isValidPackageUrl(Function.FunctionDetails.ComponentType componentType, String functionPkgUrl) { + URI uri = URI.create(functionPkgUrl); + if (componentType == null) { + // if component type is not specified, we need to check both functions and connections + return isValidFunctionsPackageUrl(uri) || isValidConnectionsPackageUrl(uri); + } + switch (componentType) { + case FUNCTION: + return isValidFunctionsPackageUrl(uri); + case SINK: + case SOURCE: + return isValidConnectionsPackageUrl(uri); + default: + throw new IllegalArgumentException("Unknown component type: " + componentType); + } + } +} diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java index 8a9ea22c53015..84b943e5671ac 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java @@ -119,7 +119,8 @@ public interface PulsarClientCreator { private Sinks sinks; private Sources sources; private Workers workers; - + @Getter + private PackageUrlValidator packageUrlValidator; private final PulsarClientCreator clientCreator; public PulsarWorkerService() { @@ -196,6 +197,7 @@ public void init(WorkerConfig workerConfig, this.sinks = new SinksImpl(() -> PulsarWorkerService.this); this.sources = new SourcesImpl(() -> PulsarWorkerService.this); this.workers = new WorkerImpl(() -> PulsarWorkerService.this); + this.packageUrlValidator = new PackageUrlValidator(workerConfig); } @Override @@ -658,6 +660,14 @@ public void stop() { if (statsUpdater != null) { statsUpdater.shutdownNow(); } + + if (null != functionsManager) { + functionsManager.close(); + } + + if (null != connectorsManager) { + connectorsManager.close(); + } } } diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java index 585b54d846169..6d07e5870917a 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java @@ -100,6 +100,7 @@ import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.FunctionConfigUtils; import org.apache.pulsar.functions.utils.FunctionMetaDataUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.functions.FunctionArchive; import org.apache.pulsar.functions.utils.io.Connector; import org.apache.pulsar.functions.worker.FunctionMetaDataManager; @@ -1375,11 +1376,17 @@ private StreamingOutput getStreamingOutput(String pkgPath) { private StreamingOutput getStreamingOutput(String pkgPath, FunctionDetails.ComponentType componentType) { return output -> { if (pkgPath.startsWith(Utils.HTTP)) { + if (!worker().getPackageUrlValidator().isValidPackageUrl(componentType, pkgPath)) { + throw new IllegalArgumentException("Invalid package url: " + pkgPath); + } URL url = URI.create(pkgPath).toURL(); try (InputStream inputStream = url.openStream()) { IOUtils.copy(inputStream, output); } } else if (pkgPath.startsWith(Utils.FILE)) { + if (!worker().getPackageUrlValidator().isValidPackageUrl(componentType, pkgPath)) { + throw new IllegalArgumentException("Invalid package url: " + pkgPath); + } URI url = URI.create(pkgPath); File file = new File(url.getPath()); Files.copy(file.toPath(), output); @@ -1788,12 +1795,6 @@ private void internalProcessFunctionRequest(final String tenant, final String na } } - protected ClassLoader getClassLoaderFromPackage(String className, - File packageFile, - String narExtractionDirectory) { - return FunctionCommon.getClassLoaderFromPackage(componentType, className, packageFile, narExtractionDirectory); - } - static File downloadPackageFile(PulsarWorkerService worker, String packageName) throws IOException, PulsarAdminException { Path tempDirectory; @@ -1809,12 +1810,17 @@ static File downloadPackageFile(PulsarWorkerService worker, String packageName) return file; } - protected File getPackageFile(String functionPkgUrl, String existingPackagePath, InputStream uploadedInputStream) + protected File getPackageFile(FunctionDetails.ComponentType componentType, String functionPkgUrl, + String existingPackagePath, InputStream uploadedInputStream) throws IOException, PulsarAdminException { File componentPackageFile = null; if (isNotBlank(functionPkgUrl)) { - componentPackageFile = getPackageFile(functionPkgUrl); + componentPackageFile = getPackageFile(componentType, functionPkgUrl); } else if (existingPackagePath.startsWith(Utils.FILE) || existingPackagePath.startsWith(Utils.HTTP)) { + if (!worker().getPackageUrlValidator().isValidPackageUrl(componentType, functionPkgUrl)) { + throw new IllegalArgumentException("Function Package url is not valid." + + "supported url (http/https/file)"); + } try { componentPackageFile = FunctionCommon.extractFileFromPkgURL(existingPackagePath); } catch (Exception e) { @@ -1822,6 +1828,8 @@ protected File getPackageFile(String functionPkgUrl, String existingPackagePath, + "when getting %s package from %s", e.getMessage(), ComponentTypeUtils.toString(componentType), functionPkgUrl)); } + } else if (Utils.hasPackageTypePrefix(existingPackagePath)) { + componentPackageFile = getPackageFile(componentType, existingPackagePath); } else if (uploadedInputStream != null) { componentPackageFile = WorkerUtils.dumpToTmpFile(uploadedInputStream); } else if (!existingPackagePath.startsWith(Utils.BUILTIN)) { @@ -1839,15 +1847,16 @@ protected File getPackageFile(String functionPkgUrl, String existingPackagePath, return componentPackageFile; } - protected File downloadPackageFile(String packageName) throws IOException, PulsarAdminException { - return downloadPackageFile(worker(), packageName); - } - - protected File getPackageFile(String functionPkgUrl) throws IOException, PulsarAdminException { + protected File getPackageFile(FunctionDetails.ComponentType componentType, String functionPkgUrl) + throws IOException, PulsarAdminException { if (Utils.hasPackageTypePrefix(functionPkgUrl)) { - return downloadPackageFile(functionPkgUrl); + if (!worker().getWorkerConfig().isFunctionsWorkerEnablePackageManagement()) { + throw new IllegalStateException("Function Package management service is disabled. " + + "Please enable it to use " + functionPkgUrl); + } + return downloadPackageFile(worker(), functionPkgUrl); } else { - if (!Utils.isFunctionPackageUrlSupported(functionPkgUrl)) { + if (!worker().getPackageUrlValidator().isValidPackageUrl(componentType, functionPkgUrl)) { throw new IllegalArgumentException("Function Package url is not valid." + "supported url (http/https/file)"); } @@ -1861,7 +1870,7 @@ protected File getPackageFile(String functionPkgUrl) throws IOException, PulsarA } } - protected ClassLoader getBuiltinFunctionClassLoader(String archive) { + protected ValidatableFunctionPackage getBuiltinFunctionPackage(String archive) { if (!StringUtils.isEmpty(archive)) { if (archive.startsWith(org.apache.pulsar.common.functions.Utils.BUILTIN)) { archive = archive.replaceFirst("^builtin://", ""); @@ -1871,7 +1880,7 @@ protected ClassLoader getBuiltinFunctionClassLoader(String archive) { if (function == null) { throw new IllegalArgumentException("Built-in " + componentType + " is not available"); } - return function.getClassLoader(); + return function.getFunctionPackage(); } } return null; diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java index 078c47524f8e9..a075d3e18a0b3 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java @@ -47,7 +47,6 @@ import org.apache.pulsar.common.functions.WorkerInfo; import org.apache.pulsar.common.policies.data.ExceptionInformation; import org.apache.pulsar.common.policies.data.FunctionStatus; -import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.functions.auth.FunctionAuthData; import org.apache.pulsar.functions.instance.InstanceUtils; @@ -55,11 +54,14 @@ import org.apache.pulsar.functions.proto.InstanceCommunication; import org.apache.pulsar.functions.utils.ComponentTypeUtils; import org.apache.pulsar.functions.utils.FunctionConfigUtils; +import org.apache.pulsar.functions.utils.FunctionFilePackage; import org.apache.pulsar.functions.utils.FunctionMetaDataUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.functions.FunctionArchive; import org.apache.pulsar.functions.worker.FunctionMetaDataManager; import org.apache.pulsar.functions.worker.FunctionsManager; import org.apache.pulsar.functions.worker.PulsarWorkerService; +import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.service.api.Functions; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; @@ -142,7 +144,7 @@ public void registerFunction(final String tenant, // validate parameters try { if (isNotBlank(functionPkgUrl)) { - componentPackageFile = getPackageFile(functionPkgUrl); + componentPackageFile = getPackageFile(componentType, functionPkgUrl); functionDetails = validateUpdateRequestParams(tenant, namespace, functionName, functionConfig, componentPackageFile); } else { @@ -303,6 +305,7 @@ public void updateFunction(final String tenant, // validate parameters try { componentPackageFile = getPackageFile( + componentType, functionPkgUrl, existingComponent.getPackageLocation().getPackagePath(), uploadedInputStream); @@ -732,11 +735,12 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant functionConfig.setTenant(tenant); functionConfig.setNamespace(namespace); functionConfig.setName(componentName); + WorkerConfig workerConfig = worker().getWorkerConfig(); FunctionConfigUtils.inferMissingArguments( - functionConfig, worker().getWorkerConfig().isForwardSourceMessageProperty()); + functionConfig, workerConfig.isForwardSourceMessageProperty()); String archive = functionConfig.getJar(); - ClassLoader classLoader = null; + ValidatableFunctionPackage functionPackage = null; // check if function is builtin and extract classloader if (!StringUtils.isEmpty(archive)) { if (archive.startsWith(org.apache.pulsar.common.functions.Utils.BUILTIN)) { @@ -749,35 +753,38 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant if (function == null) { throw new IllegalArgumentException(String.format("No Function %s found", archive)); } - classLoader = function.getClassLoader(); + functionPackage = function.getFunctionPackage(); } } - boolean shouldCloseClassLoader = false; + boolean shouldCloseFunctionPackage = false; try { - if (functionConfig.getRuntime() == FunctionConfig.Runtime.JAVA) { // if function is not builtin, attempt to extract classloader from package file if it exists - if (classLoader == null && componentPackageFile != null) { - classLoader = getClassLoaderFromPackage(functionConfig.getClassName(), - componentPackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); - shouldCloseClassLoader = true; + if (functionPackage == null && componentPackageFile != null) { + functionPackage = + new FunctionFilePackage(componentPackageFile, workerConfig.getNarExtractionDirectory(), + workerConfig.getEnableClassloadingOfExternalFiles(), FunctionDefinition.class); + shouldCloseFunctionPackage = true; } - if (classLoader == null) { + if (functionPackage == null) { throw new IllegalArgumentException("Function package is not provided"); } FunctionConfigUtils.ExtractedFunctionDetails functionDetails = FunctionConfigUtils.validateJavaFunction( - functionConfig, classLoader); + functionConfig, functionPackage); return FunctionConfigUtils.convert(functionConfig, functionDetails); } else { - classLoader = FunctionConfigUtils.validate(functionConfig, componentPackageFile); - shouldCloseClassLoader = true; - return FunctionConfigUtils.convert(functionConfig, classLoader); + FunctionConfigUtils.validateNonJavaFunction(functionConfig); + return FunctionConfigUtils.convert(functionConfig); } } finally { - if (shouldCloseClassLoader) { - ClassLoaderUtils.closeClassLoader(classLoader); + if (shouldCloseFunctionPackage && functionPackage instanceof AutoCloseable) { + try { + ((AutoCloseable) functionPackage).close(); + } catch (Exception e) { + log.error("Failed to close function file", e); + } } } } diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java index c382ec9e01b35..6b8b41e5a8e5b 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java @@ -47,18 +47,20 @@ import org.apache.pulsar.common.io.SinkConfig; import org.apache.pulsar.common.policies.data.ExceptionInformation; import org.apache.pulsar.common.policies.data.SinkStatus; -import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.functions.auth.FunctionAuthData; import org.apache.pulsar.functions.instance.InstanceUtils; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.InstanceCommunication; import org.apache.pulsar.functions.utils.ComponentTypeUtils; +import org.apache.pulsar.functions.utils.FunctionFilePackage; import org.apache.pulsar.functions.utils.FunctionMetaDataUtils; import org.apache.pulsar.functions.utils.SinkConfigUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.io.Connector; import org.apache.pulsar.functions.worker.FunctionMetaDataManager; import org.apache.pulsar.functions.worker.PulsarWorkerService; +import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.service.api.Sinks; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; @@ -141,7 +143,7 @@ public void registerSink(final String tenant, // validate parameters try { if (isNotBlank(sinkPkgUrl)) { - componentPackageFile = getPackageFile(sinkPkgUrl); + componentPackageFile = getPackageFile(componentType, sinkPkgUrl); functionDetails = validateUpdateRequestParams(tenant, namespace, sinkName, sinkConfig, componentPackageFile); } else { @@ -308,6 +310,7 @@ public void updateSink(final String tenant, // validate parameters try { componentPackageFile = getPackageFile( + componentType, sinkPkgUrl, existingComponent.getPackageLocation().getPackagePath(), uploadedInputStream); @@ -419,7 +422,8 @@ private void setTransformFunctionPackageLocation(Function.FunctionMetaData.Build try { String builtin = functionDetails.getBuiltin(); if (isBlank(builtin)) { - functionPackageFile = getPackageFile(transformFunction); + functionPackageFile = + getPackageFile(Function.FunctionDetails.ComponentType.FUNCTION, transformFunction); } Function.PackageLocationMetaData.Builder functionPackageLocation = getFunctionPackageLocation(functionMetaDataBuilder.build(), @@ -704,7 +708,7 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant sinkConfig.setName(sinkName); org.apache.pulsar.common.functions.Utils.inferMissingArguments(sinkConfig); - ClassLoader classLoader = null; + ValidatableFunctionPackage connectorFunctionPackage = null; // check if sink is builtin and extract classloader if (!StringUtils.isEmpty(sinkConfig.getArchive())) { String archive = sinkConfig.getArchive(); @@ -716,45 +720,63 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant if (connector == null) { throw new IllegalArgumentException("Built-in sink is not available"); } - classLoader = connector.getClassLoader(); + connectorFunctionPackage = connector.getConnectorFunctionPackage(); } } - boolean shouldCloseClassLoader = false; + boolean shouldCloseFunctionPackage = false; + ValidatableFunctionPackage transformFunctionPackage = null; + boolean shouldCloseTransformFunctionPackage = false; try { // if sink is not builtin, attempt to extract classloader from package file if it exists - if (classLoader == null && sinkPackageFile != null) { - classLoader = getClassLoaderFromPackage(sinkConfig.getClassName(), - sinkPackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); - shouldCloseClassLoader = true; + WorkerConfig workerConfig = worker().getWorkerConfig(); + if (connectorFunctionPackage == null && sinkPackageFile != null) { + connectorFunctionPackage = + new FunctionFilePackage(sinkPackageFile, workerConfig.getNarExtractionDirectory(), + workerConfig.getEnableClassloadingOfExternalFiles(), ConnectorDefinition.class); + shouldCloseFunctionPackage = true; } - if (classLoader == null) { + if (connectorFunctionPackage == null) { throw new IllegalArgumentException("Sink package is not provided"); } - ClassLoader functionClassLoader = null; if (isNotBlank(sinkConfig.getTransformFunction())) { - functionClassLoader = - getBuiltinFunctionClassLoader(sinkConfig.getTransformFunction()); - if (functionClassLoader == null) { - File functionPackageFile = getPackageFile(sinkConfig.getTransformFunction()); - functionClassLoader = getClassLoaderFromPackage(sinkConfig.getTransformFunctionClassName(), - functionPackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); + transformFunctionPackage = + getBuiltinFunctionPackage(sinkConfig.getTransformFunction()); + if (transformFunctionPackage == null) { + File functionPackageFile = getPackageFile(Function.FunctionDetails.ComponentType.FUNCTION, + sinkConfig.getTransformFunction()); + transformFunctionPackage = + new FunctionFilePackage(functionPackageFile, workerConfig.getNarExtractionDirectory(), + workerConfig.getEnableClassloadingOfExternalFiles(), ConnectorDefinition.class); + shouldCloseTransformFunctionPackage = true; } - if (functionClassLoader == null) { + if (transformFunctionPackage == null) { throw new IllegalArgumentException("Transform Function package not found"); } } - SinkConfigUtils.ExtractedSinkDetails sinkDetails = SinkConfigUtils.validateAndExtractDetails( - sinkConfig, classLoader, functionClassLoader, worker().getWorkerConfig().getValidateConnectorConfig()); + SinkConfigUtils.ExtractedSinkDetails sinkDetails = + SinkConfigUtils.validateAndExtractDetails(sinkConfig, connectorFunctionPackage, + transformFunctionPackage, workerConfig.getValidateConnectorConfig()); return SinkConfigUtils.convert(sinkConfig, sinkDetails); } finally { - if (shouldCloseClassLoader) { - ClassLoaderUtils.closeClassLoader(classLoader); + if (shouldCloseFunctionPackage && connectorFunctionPackage instanceof AutoCloseable) { + try { + ((AutoCloseable) connectorFunctionPackage).close(); + } catch (Exception e) { + log.error("Failed to connector function file", e); + } + } + if (shouldCloseTransformFunctionPackage && transformFunctionPackage instanceof AutoCloseable) { + try { + ((AutoCloseable) transformFunctionPackage).close(); + } catch (Exception e) { + log.error("Failed to close transform function file", e); + } } } } diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java index c55ddf48b06b9..5191306146951 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java @@ -46,18 +46,20 @@ import org.apache.pulsar.common.io.SourceConfig; import org.apache.pulsar.common.policies.data.ExceptionInformation; import org.apache.pulsar.common.policies.data.SourceStatus; -import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.functions.auth.FunctionAuthData; import org.apache.pulsar.functions.instance.InstanceUtils; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.InstanceCommunication; import org.apache.pulsar.functions.utils.ComponentTypeUtils; +import org.apache.pulsar.functions.utils.FunctionFilePackage; import org.apache.pulsar.functions.utils.FunctionMetaDataUtils; import org.apache.pulsar.functions.utils.SourceConfigUtils; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; import org.apache.pulsar.functions.utils.io.Connector; import org.apache.pulsar.functions.worker.FunctionMetaDataManager; import org.apache.pulsar.functions.worker.PulsarWorkerService; +import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.service.api.Sources; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; @@ -141,7 +143,7 @@ public void registerSource(final String tenant, // validate parameters try { if (isPkgUrlProvided) { - componentPackageFile = getPackageFile(sourcePkgUrl); + componentPackageFile = getPackageFile(componentType, sourcePkgUrl); functionDetails = validateUpdateRequestParams(tenant, namespace, sourceName, sourceConfig, componentPackageFile); } else { @@ -302,6 +304,7 @@ public void updateSource(final String tenant, // validate parameters try { componentPackageFile = getPackageFile( + componentType, sourcePkgUrl, existingComponent.getPackageLocation().getPackagePath(), uploadedInputStream); @@ -663,7 +666,7 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant sourceConfig.setName(sourceName); org.apache.pulsar.common.functions.Utils.inferMissingArguments(sourceConfig); - ClassLoader classLoader = null; + ValidatableFunctionPackage connectorFunctionPackage = null; // check if source is builtin and extract classloader if (!StringUtils.isEmpty(sourceConfig.getArchive())) { String archive = sourceConfig.getArchive(); @@ -675,30 +678,37 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant if (connector == null) { throw new IllegalArgumentException("Built-in source is not available"); } - classLoader = connector.getClassLoader(); + connectorFunctionPackage = connector.getConnectorFunctionPackage(); } } - boolean shouldCloseClassLoader = false; + boolean shouldCloseFunctionPackage = false; try { // if source is not builtin, attempt to extract classloader from package file if it exists - if (classLoader == null && sourcePackageFile != null) { - classLoader = getClassLoaderFromPackage(sourceConfig.getClassName(), - sourcePackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); - shouldCloseClassLoader = true; + WorkerConfig workerConfig = worker().getWorkerConfig(); + if (connectorFunctionPackage == null && sourcePackageFile != null) { + connectorFunctionPackage = + new FunctionFilePackage(sourcePackageFile, workerConfig.getNarExtractionDirectory(), + workerConfig.getEnableClassloadingOfExternalFiles(), ConnectorDefinition.class); + shouldCloseFunctionPackage = true; } - if (classLoader == null) { + if (connectorFunctionPackage == null) { throw new IllegalArgumentException("Source package is not provided"); } SourceConfigUtils.ExtractedSourceDetails sourceDetails = SourceConfigUtils.validateAndExtractDetails( - sourceConfig, classLoader, worker().getWorkerConfig().getValidateConnectorConfig()); + sourceConfig, connectorFunctionPackage, + workerConfig.getValidateConnectorConfig()); return SourceConfigUtils.convert(sourceConfig, sourceDetails); } finally { - if (shouldCloseClassLoader) { - ClassLoaderUtils.closeClassLoader(classLoader); + if (shouldCloseFunctionPackage && connectorFunctionPackage instanceof AutoCloseable) { + try { + ((AutoCloseable) connectorFunctionPackage).close(); + } catch (Exception e) { + log.error("Failed to connector function file", e); + } } } } diff --git a/pulsar-functions/worker/src/main/resources/findbugsExclude.xml b/pulsar-functions/worker/src/main/resources/findbugsExclude.xml index ddde8120ba518..47c3d73af5f06 100644 --- a/pulsar-functions/worker/src/main/resources/findbugsExclude.xml +++ b/pulsar-functions/worker/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java index 4e4c3d2f234aa..ac5ca617ea43b 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java @@ -30,6 +30,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; import static org.testng.AssertJUnit.fail; +import java.util.List; import java.util.Map; import java.util.Optional; import org.apache.distributedlog.api.namespace.Namespace; @@ -77,7 +78,8 @@ public void testStartFunctionWithDLNamespace() throws Exception { @SuppressWarnings("resource") FunctionActioner actioner = new FunctionActioner(workerConfig, factory, dlogNamespace, - new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class)); + new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class), + mock(PackageUrlValidator.class)); Function.FunctionMetaData function1 = Function.FunctionMetaData.newBuilder() .setFunctionDetails(Function.FunctionDetails.newBuilder().setTenant("test-tenant") .setNamespace("test-namespace").setName("func-1")) @@ -109,6 +111,8 @@ public void testStartFunctionWithPkgUrl() throws Exception { workerConfig.setPulsarServiceUrl("pulsar://localhost:6650"); workerConfig.setStateStorageServiceUrl("foo"); workerConfig.setFunctionAssignmentTopicName("assignments"); + workerConfig.setAdditionalEnabledFunctionsUrlPatterns(List.of("file:///user/.*", "http://invalid/.*")); + workerConfig.setAdditionalEnabledConnectorUrlPatterns(List.of("file:///user/.*", "http://invalid/.*")); String downloadDir = this.getClass().getProtectionDomain().getCodeSource().getLocation().getPath(); workerConfig.setDownloadDirectory(downloadDir); @@ -122,11 +126,12 @@ public void testStartFunctionWithPkgUrl() throws Exception { @SuppressWarnings("resource") FunctionActioner actioner = new FunctionActioner(workerConfig, factory, dlogNamespace, - new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class)); + new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class), + new PackageUrlValidator(workerConfig)); // (1) test with file url. functionActioner should be able to consider file-url and it should be able to call // RuntimeSpawner - String pkgPathLocation = FILE + ":/user/my-file.jar"; + String pkgPathLocation = FILE + ":///user/my-file.jar"; startFunction(actioner, pkgPathLocation, pkgPathLocation); verify(runtime, times(1)).start(); @@ -194,7 +199,8 @@ public void testFunctionAuthDisabled() throws Exception { @SuppressWarnings("resource") FunctionActioner actioner = new FunctionActioner(workerConfig, factory, dlogNamespace, - new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class)); + new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), mock(PulsarAdmin.class), + mock(PackageUrlValidator.class)); String pkgPathLocation = "http://invalid/my-file.jar"; @@ -257,7 +263,8 @@ public void testStartFunctionWithPackageUrl() throws Exception { @SuppressWarnings("resource") FunctionActioner actioner = new FunctionActioner(workerConfig, factory, dlogNamespace, - new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), pulsarAdmin); + new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), pulsarAdmin, + mock(PackageUrlValidator.class)); // (1) test with file url. functionActioner should be able to consider file-url and it should be able to call // RuntimeSpawner diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java index bc56b1766d39d..7b57a1c896539 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java @@ -720,7 +720,7 @@ public void testExternallyManagedRuntimeUpdate() throws Exception { FunctionActioner functionActioner = spy(new FunctionActioner( workerConfig, - kubernetesRuntimeFactory, null, null, null, null)); + kubernetesRuntimeFactory, null, null, null, null, workerService.getPackageUrlValidator())); try (final MockedStatic runtimeFactoryMockedStatic = Mockito .mockStatic(RuntimeFactory.class);) { diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/LeaderServiceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/LeaderServiceTest.java index 8da24fd1b7250..5c10a59bd1388 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/LeaderServiceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/LeaderServiceTest.java @@ -36,6 +36,7 @@ import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.ConsumerImpl; import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; @@ -76,7 +77,8 @@ public LeaderServiceTest() { @BeforeMethod public void setup() throws PulsarClientException { mockClient = mock(PulsarClientImpl.class); - + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); mockConsumer = mock(ConsumerImpl.class); ConsumerBuilder mockConsumerBuilder = mock(ConsumerBuilder.class); diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/MembershipManagerTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/MembershipManagerTest.java index e066bb24e6ef0..ac3176b3135e2 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/MembershipManagerTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/MembershipManagerTest.java @@ -41,6 +41,7 @@ import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.api.ReaderBuilder; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.ConsumerImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.functions.WorkerInfo; @@ -68,7 +69,8 @@ public MembershipManagerTest() { private static PulsarClient mockPulsarClient() throws PulsarClientException { PulsarClientImpl mockClient = mock(PulsarClientImpl.class); - + ConnectionPool connectionPool = mock(ConnectionPool.class); + when(mockClient.getCnxPool()).thenReturn(connectionPool); ConsumerImpl mockConsumer = mock(ConsumerImpl.class); ConsumerBuilder mockConsumerBuilder = mock(ConsumerBuilder.class); diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java index cfe087c78406a..73a229893e5d2 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java @@ -381,6 +381,6 @@ public static FunctionConfig createDefaultFunctionConfig() { public static Function.FunctionDetails createDefaultFunctionDetails() { FunctionConfig functionConfig = createDefaultFunctionConfig(); - return FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + return FunctionConfigUtils.convert(functionConfig); } } diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java index 32a104c576993..a8415919c119b 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java @@ -18,1125 +18,82 @@ */ package org.apache.pulsar.functions.worker.rest.api.v2; - -import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; import static org.apache.pulsar.functions.utils.FunctionCommon.mergeJson; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; -import com.google.common.collect.Lists; import com.google.gson.Gson; -import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.util.JsonFormat; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.HashMap; -import java.util.LinkedList; import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.function.Consumer; import javax.ws.rs.core.Response; import javax.ws.rs.core.StreamingOutput; -import org.apache.distributedlog.api.namespace.Namespace; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.core.config.Configurator; import org.apache.pulsar.broker.authentication.AuthenticationParameters; -import org.apache.pulsar.client.admin.Functions; -import org.apache.pulsar.client.admin.Namespaces; -import org.apache.pulsar.client.admin.PulsarAdmin; -import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.admin.Tenants; import org.apache.pulsar.common.functions.FunctionConfig; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.functions.UpdateOptionsImpl; import org.apache.pulsar.common.util.RestException; -import org.apache.pulsar.functions.api.Context; -import org.apache.pulsar.functions.api.Function; -import org.apache.pulsar.functions.instance.InstanceUtils; -import org.apache.pulsar.functions.proto.Function.FunctionDetails; -import org.apache.pulsar.functions.proto.Function.FunctionMetaData; -import org.apache.pulsar.functions.proto.Function.PackageLocationMetaData; -import org.apache.pulsar.functions.proto.Function.ProcessingGuarantees; -import org.apache.pulsar.functions.proto.Function.SinkSpec; -import org.apache.pulsar.functions.proto.Function.SourceSpec; -import org.apache.pulsar.functions.proto.Function.SubscriptionType; -import org.apache.pulsar.functions.runtime.RuntimeFactory; -import org.apache.pulsar.functions.source.TopicSchema; -import org.apache.pulsar.functions.utils.FunctionCommon; +import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.utils.FunctionConfigUtils; -import org.apache.pulsar.functions.worker.FunctionMetaDataManager; -import org.apache.pulsar.functions.worker.FunctionRuntimeManager; -import org.apache.pulsar.functions.worker.LeaderService; -import org.apache.pulsar.functions.worker.PulsarWorkerService; -import org.apache.pulsar.functions.worker.WorkerConfig; -import org.apache.pulsar.functions.worker.WorkerUtils; -import org.apache.pulsar.functions.worker.rest.api.FunctionsImpl; import org.apache.pulsar.functions.worker.rest.api.FunctionsImplV2; -import org.apache.pulsar.functions.worker.rest.api.PulsarFunctionTestTemporaryDirectory; +import org.apache.pulsar.functions.worker.rest.api.v3.AbstractFunctionApiResourceTest; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.testng.Assert; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -/** - * Unit test of {@link FunctionsApiV2Resource}. - */ -public class FunctionApiV2ResourceTest { - - private static final class TestFunction implements Function { - - @Override - public String process(String input, Context context) { - return input; - } - } - - private static final String tenant = "test-tenant"; - private static final String namespace = "test-namespace"; - private static final String function = "test-function"; - private static final String outputTopic = "test-output-topic"; - private static final String outputSerdeClassName = TopicSchema.DEFAULT_SERDE; - private static final String className = TestFunction.class.getName(); - private SubscriptionType subscriptionType = SubscriptionType.FAILOVER; - private static final Map topicsToSerDeClassName = new HashMap<>(); - static { - topicsToSerDeClassName.put("persistent://public/default/test_src", TopicSchema.DEFAULT_SERDE); - } - private static final int parallelism = 1; - - private PulsarWorkerService mockedWorkerService; - private PulsarAdmin mockedPulsarAdmin; - private Tenants mockedTenants; - private Namespaces mockedNamespaces; - private Functions mockedFunctions; - private TenantInfoImpl mockedTenantInfo; - private List namespaceList = new LinkedList<>(); - private FunctionMetaDataManager mockedManager; - private FunctionRuntimeManager mockedFunctionRunTimeManager; - private RuntimeFactory mockedRuntimeFactory; - private Namespace mockedNamespace; +public class FunctionApiV2ResourceTest extends AbstractFunctionApiResourceTest { private FunctionsImplV2 resource; - private InputStream mockedInputStream; - private FormDataContentDisposition mockedFormData; - private FunctionMetaData mockedFunctionMetadata; - private LeaderService mockedLeaderService; - private PulsarFunctionTestTemporaryDirectory tempDirectory; - private static Map mockStaticContexts = new HashMap<>(); - - @BeforeMethod - public void setup() throws Exception { - this.mockedManager = mock(FunctionMetaDataManager.class); - this.mockedFunctionRunTimeManager = mock(FunctionRuntimeManager.class); - this.mockedTenantInfo = mock(TenantInfoImpl.class); - this.mockedRuntimeFactory = mock(RuntimeFactory.class); - this.mockedInputStream = mock(InputStream.class); - this.mockedNamespace = mock(Namespace.class); - this.mockedFormData = mock(FormDataContentDisposition.class); - when(mockedFormData.getFileName()).thenReturn("test"); - this.mockedPulsarAdmin = mock(PulsarAdmin.class); - this.mockedTenants = mock(Tenants.class); - this.mockedNamespaces = mock(Namespaces.class); - this.mockedFunctions = mock(Functions.class); - this.mockedLeaderService = mock(LeaderService.class); - this.mockedFunctionMetadata = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); - namespaceList.add(tenant + "/" + namespace); - - this.mockedWorkerService = mock(PulsarWorkerService.class); - when(mockedWorkerService.getFunctionMetaDataManager()).thenReturn(mockedManager); - when(mockedWorkerService.getLeaderService()).thenReturn(mockedLeaderService); - when(mockedWorkerService.getFunctionRuntimeManager()).thenReturn(mockedFunctionRunTimeManager); - when(mockedFunctionRunTimeManager.getRuntimeFactory()).thenReturn(mockedRuntimeFactory); - when(mockedWorkerService.getDlogNamespace()).thenReturn(mockedNamespace); - when(mockedWorkerService.isInitialized()).thenReturn(true); - when(mockedWorkerService.getBrokerAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedWorkerService.getFunctionAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedPulsarAdmin.tenants()).thenReturn(mockedTenants); - when(mockedPulsarAdmin.namespaces()).thenReturn(mockedNamespaces); - when(mockedPulsarAdmin.functions()).thenReturn(mockedFunctions); - when(mockedTenants.getTenantInfo(any())).thenReturn(mockedTenantInfo); - when(mockedNamespaces.getNamespaces(any())).thenReturn(namespaceList); - when(mockedLeaderService.isLeader()).thenReturn(true); - when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetadata); - - // worker config - WorkerConfig workerConfig = new WorkerConfig() - .setWorkerId("test") - .setWorkerPort(8080) - .setFunctionMetadataTopicName("pulsar/functions") - .setNumFunctionPackageReplicas(3) - .setPulsarServiceUrl("pulsar://localhost:6650/"); - tempDirectory = PulsarFunctionTestTemporaryDirectory.create(getClass().getSimpleName()); - tempDirectory.useTemporaryDirectoriesForWorkerConfig(workerConfig); - when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); - - FunctionsImpl functions = spy(new FunctionsImpl(() -> mockedWorkerService)); - - this.resource = spy(new FunctionsImplV2(functions)); - - } - - @AfterMethod(alwaysRun = true) - public void cleanup() { - if (tempDirectory != null) { - tempDirectory.delete(); - } - mockStaticContexts.values().forEach(MockedStatic::close); - mockStaticContexts.clear(); - } - - private void mockStatic(Class classStatic, Consumer> consumer) { - final MockedStatic mockedStatic = - mockStaticContexts.computeIfAbsent(classStatic.getName(), name -> Mockito.mockStatic(classStatic)); - consumer.accept(mockedStatic); - } - - private void mockWorkerUtils() { - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - }); - } - - private void mockWorkerUtils(Consumer> consumer) { - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - if (consumer != null) { - consumer.accept(ctx); - } - }); - } - - private void mockInstanceUtils() { - mockStatic(InstanceUtils.class, ctx -> { - ctx.when(() -> InstanceUtils.calculateSubjectType(any())) - .thenReturn(FunctionDetails.ComponentType.FUNCTION); - }); - } - - // - // Register Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testRegisterFunctionMissingTenant() { - try { - testRegisterFunctionMissingArguments( - null, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testRegisterFunctionMissingNamespace() { - try { - testRegisterFunctionMissingArguments( - tenant, - null, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testRegisterFunctionMissingFunctionName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - null, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function package is not provided") - public void testRegisterFunctionMissingPackage() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "No input topic\\(s\\) specified for the function") - public void testRegisterFunctionMissingInputTopics() throws Exception { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - null, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function Package is not provided") - public void testRegisterFunctionMissingPackageDetails() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - null, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function package does not have" - + " the correct format. Pulsar cannot determine if the package is a NAR package or JAR package. Function " - + "classname is not provided and attempts to load it as a NAR package produced the following error.*") - public void testRegisterFunctionMissingClassName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function class UnknownClass must be in class path") - public void testRegisterFunctionWrongClassName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - "UnknownClass", - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function parallelism must be a positive number") - public void testRegisterFunctionWrongParallelism() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - -2, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, - expectedExceptionsMessageRegExp = "Output topic persistent://public/default/test_src is also being used as an input topic \\(topics must be one or the other\\)") - public void testRegisterFunctionSameInputOutput() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - topicsToSerDeClassName.keySet().iterator().next(), - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Output topic " + function + - "-output-topic/test:" + " is invalid") - public void testRegisterFunctionWrongOutputTopic() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - function + "-output-topic/test:", - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Encountered error .*. when getting Function package from .*") - public void testRegisterFunctionHttpUrl() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - null, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "http://localhost:1234/test"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testRegisterFunctionMissingArguments( - String tenant, - String namespace, - String function, - InputStream inputStream, - Map topicsToSerDeClassName, - FormDataContentDisposition details, - String outputTopic, - String outputSerdeClassName, - String className, - Integer parallelism, - String functionPkgUrl) { - FunctionConfig functionConfig = new FunctionConfig(); - if (tenant != null) { - functionConfig.setTenant(tenant); - } - if (namespace != null) { - functionConfig.setNamespace(namespace); - } - if (function != null) { - functionConfig.setName(function); - } - if (topicsToSerDeClassName != null) { - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - } - if (outputTopic != null) { - functionConfig.setOutput(outputTopic); - } - if (outputSerdeClassName != null) { - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - } - if (className != null) { - functionConfig.setClassName(className); - } - if (parallelism != null) { - functionConfig.setParallelism(parallelism); - } - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - - mockWorkerUtils(ctx -> { - ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class))) - .thenCallRealMethod(); - }); - - try { - resource.registerFunction( - tenant, - namespace, - function, - inputStream, - details, - functionPkgUrl, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - - } - - private void registerDefaultFunction() { - FunctionConfig functionConfig = createDefaultFunctionConfig(); - try { - resource.registerFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - null, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function already exists") - public void testRegisterExistedFunction() { - try { - Configurator.setRootLevel(Level.DEBUG); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") - public void testRegisterFunctionUploadFailure() throws Exception { - try { - mockWorkerUtils(ctx -> { - ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class))) - .thenThrow(new IOException("upload failure")); - }); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - @Test - public void testRegisterFunctionSuccess() throws Exception { - try { - try (MockedStatic mocked = Mockito.mockStatic(WorkerUtils.class)) { - mocked.when(() -> WorkerUtils.uploadToBookKeeper( - any(Namespace.class), - any(InputStream.class), - anyString())).thenAnswer((i) -> null); - mocked.when(() -> WorkerUtils.dumpToTmpFile(any())).thenAnswer(i -> - { - try { - File tmpFile = FunctionCommon.createPkgTempFile(); - tmpFile.deleteOnExit(); - Files.copy((InputStream) i.getArguments()[0], tmpFile.toPath(), REPLACE_EXISTING); - return tmpFile; - } catch (IOException e) { - throw new RuntimeException("Cannot create a temporary file", e); - } - - } - ); - WorkerUtils.uploadToBookKeeper(null, null, null); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - registerDefaultFunction(); - } - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace does not exist") - public void testRegisterFunctionNonExistingNamespace() { - try { - this.namespaceList.clear(); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant does not exist") - public void testRegisterFunctionNonexistantTenant() throws Exception { - try { - when(mockedTenants.getTenantInfo(any())).thenThrow(PulsarAdminException.NotFoundException.class); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") - public void testRegisterFunctionFailure() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - doThrow(new IllegalArgumentException("function failed to register")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registration interrupted") - public void testRegisterFunctionInterrupted() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - doThrow(new IllegalStateException("Function registration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - // - // Update Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testUpdateFunctionMissingTenant() throws Exception { - try { - testUpdateFunctionMissingArguments( - null, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Tenant is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testUpdateFunctionMissingNamespace() throws Exception { - try { - testUpdateFunctionMissingArguments( - tenant, - null, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Namespace is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testUpdateFunctionMissingFunctionName() throws Exception { - try { - testUpdateFunctionMissingArguments( - tenant, - namespace, - null, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Function name is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingPackage() throws Exception { - try { - mockWorkerUtils(); - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingInputTopic() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - null, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingClassName() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test - public void testUpdateFunctionChangedParallelism() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism + 1, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + @Override + protected void doSetup() { + super.doSetup(); + this.resource = spy(new FunctionsImplV2(() -> mockedWorkerService)); } - @Test - public void testUpdateFunctionChangedInputs() throws Exception { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( + protected void registerFunction(String tenant, String namespace, String function, InputStream inputStream, + FormDataContentDisposition details, String functionPkgUrl, + FunctionConfig functionConfig) throws IOException { + resource.registerFunction( tenant, namespace, function, - null, - topicsToSerDeClassName, - mockedFormData, - "DifferentOutput", - outputSerdeClassName, - null, - parallelism, + inputStream, + details, + functionPkgUrl, + JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig)), null); } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Input Topics cannot be altered") - public void testUpdateFunctionChangedOutput() throws Exception { - try { - mockWorkerUtils(); - - Map someOtherInput = new HashMap<>(); - someOtherInput.put("DifferentTopic", TopicSchema.DEFAULT_SERDE); - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - someOtherInput, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - "Input Topics cannot be altered"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testUpdateFunctionMissingArguments( - String tenant, - String namespace, - String function, - InputStream inputStream, - Map topicsToSerDeClassName, - FormDataContentDisposition details, - String outputTopic, - String outputSerdeClassName, - String className, - Integer parallelism, - String expectedError) throws Exception { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - FunctionConfig functionConfig = new FunctionConfig(); - if (tenant != null) { - functionConfig.setTenant(tenant); - } - if (namespace != null) { - functionConfig.setNamespace(namespace); - } - if (function != null) { - functionConfig.setName(function); - } - if (topicsToSerDeClassName != null) { - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - } - if (outputTopic != null) { - functionConfig.setOutput(outputTopic); - } - if (outputSerdeClassName != null) { - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - } - if (className != null) { - functionConfig.setClassName(className); - } - if (parallelism != null) { - functionConfig.setParallelism(parallelism); - } - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - - if (expectedError != null) { - doThrow(new IllegalArgumentException(expectedError)) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - } - - try { - resource.updateFunction( - tenant, - namespace, - function, - inputStream, - details, - null, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - - } - - private void updateDefaultFunction() { - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - - try { - resource.updateFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - null, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't exist") - public void testUpdateNotExistedFunction() { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") - public void testUpdateFunctionUploadFailure() throws Exception { - try { - mockWorkerUtils(ctx -> { - ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class))) - .thenThrow(new IOException("upload failure")); - }); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - @Test - public void testUpdateFunctionSuccess() throws Exception { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - updateDefaultFunction(); - } - - @Test - public void testUpdateFunctionWithUrl() throws Exception { - Configurator.setRootLevel(Level.DEBUG); - - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; - - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - try { - resource.updateFunction( - tenant, - namespace, - function, - null, - null, - filePackageUrl, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") - public void testUpdateFunctionFailure() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalArgumentException("function failed to register")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registeration interrupted") - public void testUpdateFunctionInterrupted() throws Exception { - try { - mockWorkerUtils(); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalStateException("Function registeration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - // - // deregister function - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testDeregisterFunctionMissingTenant() { - try { - - testDeregisterFunctionMissingArguments( - null, - namespace, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + protected void updateFunction(String tenant, + String namespace, + String functionName, + InputStream uploadedInputStream, + FormDataContentDisposition fileDetail, + String functionPkgUrl, + FunctionConfig functionConfig, + AuthenticationParameters authParams, + UpdateOptionsImpl updateOptions) throws IOException { + resource.updateFunction(tenant, namespace, functionName, uploadedInputStream, fileDetail, functionPkgUrl, + JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig)), authParams); } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testDeregisterFunctionMissingNamespace() { - try { - testDeregisterFunctionMissingArguments( - tenant, - null, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; + protected File downloadFunction(final String path, final AuthenticationParameters authParams) + throws IOException { + Response response = resource.downloadFunction(path, authParams); + StreamingOutput streamingOutput = readEntity(response, StreamingOutput.class); + File pkgFile = File.createTempFile("testpkg", "nar"); + try (OutputStream output = new FileOutputStream(pkgFile)) { + streamingOutput.write(output); } + return pkgFile; } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testDeregisterFunctionMissingFunctionName() { - try { - testDeregisterFunctionMissingArguments( - tenant, - namespace, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + private T readEntity(Response response, Class clazz) { + return clazz.cast(response.getEntity()); } - private void testDeregisterFunctionMissingArguments( + protected void testDeregisterFunctionMissingArguments( String tenant, String namespace, String function @@ -1145,112 +102,18 @@ private void testDeregisterFunctionMissingArguments( tenant, namespace, function, - AuthenticationParameters.builder().build()); + null); } - private void deregisterDefaultFunction() { + protected void deregisterDefaultFunction() { resource.deregisterFunction( tenant, namespace, function, - AuthenticationParameters.builder().build()); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't exist") - public void testDeregisterNotExistedFunction() { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.NOT_FOUND); - throw re; - } - } - - @Test - public void testDeregisterFunctionSuccess() { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - deregisterDefaultFunction(); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to deregister") - public void testDeregisterFunctionFailure() throws Exception { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalArgumentException("function failed to deregister")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function deregisteration interrupted") - public void testDeregisterFunctionInterrupted() throws Exception { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalStateException("Function deregisteration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), anyBoolean()); - - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - // - // Get Function Info - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testGetFunctionMissingTenant() throws IOException { - try { - testGetFunctionMissingArguments( - null, - namespace, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testGetFunctionMissingNamespace() throws IOException { - try { - testGetFunctionMissingArguments( - tenant, - null, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testGetFunctionMissingFunctionName() throws IOException { - try { - testGetFunctionMissingArguments( - tenant, - namespace, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + null); } - private void testGetFunctionMissingArguments( + protected void testGetFunctionMissingArguments( String tenant, String namespace, String function @@ -1258,20 +121,36 @@ private void testGetFunctionMissingArguments( resource.getFunctionInfo( tenant, namespace, - function, - AuthenticationParameters.builder().build() + function, null + ); + } + + protected void testListFunctionsMissingArguments( + String tenant, + String namespace + ) { + resource.listFunctions( + tenant, + namespace, null ); } - private FunctionDetails getDefaultFunctionInfo() throws IOException { + protected List listDefaultFunctions() { + return new Gson().fromJson(readEntity(resource.listFunctions( + tenant, + namespace, null + ), String.class), List.class); + } + + private Function.FunctionDetails getDefaultFunctionInfo() throws IOException { String json = (String) resource.getFunctionInfo( tenant, namespace, function, AuthenticationParameters.builder().build() ).getEntity(); - FunctionDetails.Builder functionDetailsBuilder = FunctionDetails.newBuilder(); + Function.FunctionDetails.Builder functionDetailsBuilder = Function.FunctionDetails.newBuilder(); mergeJson(json, functionDetailsBuilder); return functionDetailsBuilder.build(); } @@ -1292,218 +171,31 @@ public void testGetFunctionSuccess() throws IOException { mockInstanceUtils(); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - SinkSpec sinkSpec = SinkSpec.newBuilder() + Function.SinkSpec sinkSpec = Function.SinkSpec.newBuilder() .setTopic(outputTopic) .setSerDeClassName(outputSerdeClassName).build(); - FunctionDetails functionDetails = FunctionDetails.newBuilder() + Function.FunctionDetails functionDetails = Function.FunctionDetails.newBuilder() .setClassName(className) .setSink(sinkSpec) .setName(function) .setNamespace(namespace) - .setProcessingGuarantees(ProcessingGuarantees.ATMOST_ONCE) + .setProcessingGuarantees(Function.ProcessingGuarantees.ATMOST_ONCE) .setAutoAck(true) .setTenant(tenant) .setParallelism(parallelism) - .setSource(SourceSpec.newBuilder().setSubscriptionType(subscriptionType) + .setSource(Function.SourceSpec.newBuilder().setSubscriptionType(subscriptionType) .putAllTopicsToSerDeClassName(topicsToSerDeClassName)).build(); - FunctionMetaData metaData = FunctionMetaData.newBuilder() + Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder() .setCreateTime(System.currentTimeMillis()) .setFunctionDetails(functionDetails) - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("/path/to/package")) + .setPackageLocation(Function.PackageLocationMetaData.newBuilder().setPackagePath("/path/to/package")) .setVersion(1234) .build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); - FunctionDetails actual = getDefaultFunctionInfo(); + Function.FunctionDetails actual = getDefaultFunctionInfo(); assertEquals( functionDetails, actual); } - - // - // List Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testListFunctionsMissingTenant() { - try { - testListFunctionsMissingArguments( - null, - namespace - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testListFunctionsMissingNamespace() { - try { - testListFunctionsMissingArguments( - tenant, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testListFunctionsMissingArguments( - String tenant, - String namespace - ) { - resource.listFunctions( - tenant, - namespace, - AuthenticationParameters.builder().build() - ); - - } - - private List listDefaultFunctions() { - return new Gson().fromJson((String) resource.listFunctions( - tenant, - namespace, - AuthenticationParameters.builder().build() - ).getEntity(), List.class); - } - - @Test - public void testListFunctionsSuccess() { - mockInstanceUtils(); - final List functions = Lists.newArrayList("test-1", "test-2"); - final List metaDataList = new LinkedList<>(); - FunctionMetaData functionMetaData1 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder().setName("test-1").build() - ).build(); - FunctionMetaData functionMetaData2 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder().setName("test-2").build() - ).build(); - metaDataList.add(functionMetaData1); - metaDataList.add(functionMetaData2); - when(mockedManager.listFunctions(eq(tenant), eq(namespace))).thenReturn(metaDataList); - - List functionList = listDefaultFunctions(); - assertEquals(functions, functionList); - } - - @Test - public void testDownloadFunctionHttpUrl() throws Exception { - String jarHttpUrl = - "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; - String testDir = FunctionApiV2ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - FunctionsImplV2 function = new FunctionsImplV2(() -> mockedWorkerService); - StreamingOutput streamOutput = (StreamingOutput) function.downloadFunction(jarHttpUrl, - AuthenticationParameters.builder().build()).getEntity(); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - Assert.assertTrue(pkgFile.exists()); - if (pkgFile.exists()) { - pkgFile.delete(); - } - } - - @Test - public void testDownloadFunctionFile() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String testDir = FunctionApiV2ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - FunctionsImplV2 function = new FunctionsImplV2(() -> mockedWorkerService); - StreamingOutput streamOutput = (StreamingOutput) function.downloadFunction("file:///" + fileLocation, - AuthenticationParameters.builder().build()).getEntity(); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - Assert.assertTrue(pkgFile.exists()); - if (pkgFile.exists()) { - pkgFile.delete(); - } - } - - @Test - public void testRegisterFunctionFileUrlWithValidSinkClass() throws Exception { - Configurator.setRootLevel(Level.DEBUG); - - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - try { - resource.registerFunction(tenant, namespace, function, null, null, filePackageUrl, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - - } - - @Test - public void testRegisterFunctionWithConflictingFields() throws Exception { - Configurator.setRootLevel(Level.DEBUG); - String actualTenant = "DIFFERENT_TENANT"; - String actualNamespace = "DIFFERENT_NAMESPACE"; - String actualName = "DIFFERENT_NAME"; - this.namespaceList.add(actualTenant + "/" + actualNamespace); - - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - when(mockedManager.containsFunction(eq(actualTenant), eq(actualNamespace), eq(actualName))).thenReturn(false); - - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - try { - resource.registerFunction(actualTenant, actualNamespace, actualName, null, null, filePackageUrl, - JsonFormat.printer().print(FunctionConfigUtils.convert(functionConfig, (ClassLoader) null)), - AuthenticationParameters.builder().build()); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); - } - } - - public static FunctionConfig createDefaultFunctionConfig() { - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - return functionConfig; - } - - public static FunctionDetails createDefaultFunctionDetails() { - FunctionConfig functionConfig = createDefaultFunctionConfig(); - return FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); - } } diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionApiResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionApiResourceTest.java new file mode 100644 index 0000000000000..388331ce6f241 --- /dev/null +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionApiResourceTest.java @@ -0,0 +1,1374 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.functions.worker.rest.api.v3; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import com.google.common.collect.Lists; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.Method; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import javax.ws.rs.core.Response; +import org.apache.distributedlog.api.namespace.Namespace; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.core.config.Configurator; +import org.apache.pulsar.broker.authentication.AuthenticationParameters; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.UpdateOptionsImpl; +import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.RestException; +import org.apache.pulsar.functions.api.Context; +import org.apache.pulsar.functions.api.Function; +import org.apache.pulsar.functions.proto.Function.FunctionDetails; +import org.apache.pulsar.functions.proto.Function.FunctionMetaData; +import org.apache.pulsar.functions.proto.Function.SubscriptionType; +import org.apache.pulsar.functions.source.TopicSchema; +import org.apache.pulsar.functions.utils.FunctionConfigUtils; +import org.apache.pulsar.functions.worker.WorkerConfig; +import org.apache.pulsar.functions.worker.WorkerUtils; +import org.glassfish.jersey.media.multipart.FormDataContentDisposition; +import org.mockito.Mockito; +import org.testng.Assert; +import org.testng.annotations.Test; + +public abstract class AbstractFunctionApiResourceTest extends AbstractFunctionsResourceTest { + @Override + protected void customizeWorkerConfig(WorkerConfig workerConfig, Method method) { + if (method.getName().contains("Upload")) { + workerConfig.setFunctionsWorkerEnablePackageManagement(false); + } + } + + @Test + public void testListFunctionsSuccess() { + mockInstanceUtils(); + final List functions = Lists.newArrayList("test-1", "test-2"); + final List metaDataList = new LinkedList<>(); + FunctionMetaData functionMetaData1 = FunctionMetaData.newBuilder().setFunctionDetails( + FunctionDetails.newBuilder().setName("test-1").build() + ).build(); + FunctionMetaData functionMetaData2 = FunctionMetaData.newBuilder().setFunctionDetails( + FunctionDetails.newBuilder().setName("test-2").build() + ).build(); + metaDataList.add(functionMetaData1); + metaDataList.add(functionMetaData2); + when(mockedManager.listFunctions(eq(tenant), eq(namespace))).thenReturn(metaDataList); + + List functionList = listDefaultFunctions(); + assertEquals(functions, functionList); + } + + @Test + public void testOnlyGetSources() { + List functions = Lists.newArrayList("test-2"); + List functionMetaDataList = new LinkedList<>(); + FunctionMetaData f1 = FunctionMetaData.newBuilder().setFunctionDetails( + FunctionDetails.newBuilder() + .setName("test-1") + .setComponentType(FunctionDetails.ComponentType.SOURCE) + .build()).build(); + functionMetaDataList.add(f1); + FunctionMetaData f2 = FunctionMetaData.newBuilder().setFunctionDetails( + FunctionDetails.newBuilder() + .setName("test-2") + .setComponentType(FunctionDetails.ComponentType.FUNCTION) + .build()).build(); + functionMetaDataList.add(f2); + FunctionMetaData f3 = FunctionMetaData.newBuilder().setFunctionDetails( + FunctionDetails.newBuilder() + .setName("test-3") + .setComponentType(FunctionDetails.ComponentType.SINK) + .build()).build(); + functionMetaDataList.add(f3); + when(mockedManager.listFunctions(eq(tenant), eq(namespace))).thenReturn(functionMetaDataList); + + List functionList = listDefaultFunctions(); + assertEquals(functions, functionList); + } + + private static final class TestFunction implements Function { + + @Override + public String process(String input, Context context) { + return input; + } + } + + private static final class WrongFunction implements Consumer { + @Override + public void accept(String s) { + + } + } + + protected static final String function = "test-function"; + protected static final String outputTopic = "test-output-topic"; + protected static final String outputSerdeClassName = TopicSchema.DEFAULT_SERDE; + protected static final String className = TestFunction.class.getName(); + protected SubscriptionType subscriptionType = SubscriptionType.FAILOVER; + protected FunctionMetaData mockedFunctionMetadata; + + + @Override + protected void doSetup() { + this.mockedFunctionMetadata = + FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); + when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetadata); + } + + @Override + protected FunctionDetails.ComponentType getComponentType() { + return FunctionDetails.ComponentType.FUNCTION; + } + + + abstract protected void registerFunction(String tenant, String namespace, String function, InputStream inputStream, + FormDataContentDisposition details, String functionPkgUrl, FunctionConfig functionConfig) + throws IOException; + abstract protected void updateFunction(String tenant, + String namespace, + String functionName, + InputStream uploadedInputStream, + FormDataContentDisposition fileDetail, + String functionPkgUrl, + FunctionConfig functionConfig, + AuthenticationParameters authParams, + UpdateOptionsImpl updateOptions) throws IOException; + + abstract protected File downloadFunction(final String path, final AuthenticationParameters authParams) + throws IOException; + + abstract protected void testDeregisterFunctionMissingArguments( + String tenant, + String namespace, + String function + ); + + abstract protected void deregisterDefaultFunction(); + + abstract protected void testGetFunctionMissingArguments( + String tenant, + String namespace, + String function + ) throws IOException; + + abstract protected void testListFunctionsMissingArguments( + String tenant, + String namespace + ); + + abstract protected List listDefaultFunctions(); + + // + // Register Functions + // + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") + public void testRegisterFunctionMissingTenant() throws IOException { + try { + testRegisterFunctionMissingArguments( + null, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") + public void testRegisterFunctionMissingNamespace() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + null, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") + public void testRegisterFunctionMissingFunctionName() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + null, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function package is not " + + "provided") + public void testRegisterFunctionMissingPackage() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "No input topic\\(s\\) " + + "specified for the function") + public void testRegisterFunctionMissingInputTopics() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + null, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function Package is not " + + "provided") + public void testRegisterFunctionMissingPackageDetails() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + null, + outputTopic, + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, + expectedExceptionsMessageRegExp = "Function class name is not provided.") + public void testRegisterFunctionMissingClassName() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + null, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function class UnknownClass " + + "must be in class path") + public void testRegisterFunctionWrongClassName() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + "UnknownClass", + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function parallelism must be a" + + " positive number") + public void testRegisterFunctionWrongParallelism() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + -2, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, + expectedExceptionsMessageRegExp = "Output topic persistent://public/default/test_src is also being used " + + "as an input topic \\(topics must be one or the other\\)") + public void testRegisterFunctionSameInputOutput() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + topicsToSerDeClassName.keySet().iterator().next(), + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Output topic " + function + + "-output-topic/test:" + " is invalid") + public void testRegisterFunctionWrongOutputTopic() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + function + "-output-topic/test:", + outputSerdeClassName, + className, + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Encountered error .*. when " + + "getting Function package from .*") + public void testRegisterFunctionHttpUrl() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + null, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "http://localhost:1234/test"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function class .*. does not " + + "implement the correct interface") + public void testRegisterFunctionImplementWrongInterface() throws IOException { + try { + testRegisterFunctionMissingArguments( + tenant, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + WrongFunction.class.getName(), + parallelism, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + private void testRegisterFunctionMissingArguments( + String tenant, + String namespace, + String function, + InputStream inputStream, + Map topicsToSerDeClassName, + FormDataContentDisposition details, + String outputTopic, + String outputSerdeClassName, + String className, + Integer parallelism, + String functionPkgUrl) throws IOException { + FunctionConfig functionConfig = new FunctionConfig(); + if (tenant != null) { + functionConfig.setTenant(tenant); + } + if (namespace != null) { + functionConfig.setNamespace(namespace); + } + if (function != null) { + functionConfig.setName(function); + } + if (topicsToSerDeClassName != null) { + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + } + if (outputTopic != null) { + functionConfig.setOutput(outputTopic); + } + if (outputSerdeClassName != null) { + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + } + if (className != null) { + functionConfig.setClassName(className); + } + if (parallelism != null) { + functionConfig.setParallelism(parallelism); + } + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + + registerFunction(tenant, namespace, function, inputStream, details, functionPkgUrl, functionConfig); + + } + + @Test(expectedExceptions = Exception.class, expectedExceptionsMessageRegExp = "Function config is not provided") + public void testUpdateMissingFunctionConfig() throws IOException { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + updateFunction( + tenant, + namespace, + function, + mockedInputStream, + mockedFormData, + null, + null, + null, null); + } + + + private void registerDefaultFunction() throws IOException { + registerDefaultFunctionWithPackageUrl(null); + } + + private void registerDefaultFunctionWithPackageUrl(String packageUrl) throws IOException { + FunctionConfig functionConfig = createDefaultFunctionConfig(); + registerFunction(tenant, namespace, function, mockedInputStream, mockedFormData, packageUrl, functionConfig); + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function already" + + " exists") + public void testRegisterExistedFunction() throws IOException { + try { + Configurator.setRootLevel(Level.DEBUG); + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") + public void testRegisterFunctionUploadFailure() throws IOException { + try { + mockWorkerUtils(ctx -> { + ctx.when(() -> { + WorkerUtils.uploadFileToBookkeeper( + anyString(), + any(File.class), + any(Namespace.class)); + } + ).thenThrow(new IOException("upload failure")); + }); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); + throw re; + } + } + + @Test + public void testRegisterFunctionSuccess() throws IOException { + try { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(timeOut = 20000) + public void testRegisterFunctionSuccessWithPackageName() throws IOException { + registerDefaultFunctionWithPackageUrl("function://public/default/test@v1"); + } + + @Test(timeOut = 20000) + public void testRegisterFunctionFailedWithWrongPackageName() throws PulsarAdminException, IOException { + try { + doThrow(new PulsarAdminException("package name is invalid")) + .when(mockedPackages).download(anyString(), anyString()); + registerDefaultFunctionWithPackageUrl("function://"); + } catch (RestException e) { + // expected exception + assertEquals(e.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace does not exist") + public void testRegisterFunctionNonExistingNamespace() throws IOException { + try { + this.namespaceList.clear(); + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant does not exist") + public void testRegisterFunctionNonexistantTenant() throws Exception { + try { + when(mockedTenants.getTenantInfo(any())).thenThrow(PulsarAdminException.NotFoundException.class); + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") + public void testRegisterFunctionFailure() throws Exception { + try { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + + doThrow(new IllegalArgumentException("function failed to register")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registration " + + "interrupted") + public void testRegisterFunctionInterrupted() throws Exception { + try { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + + doThrow(new IllegalStateException("Function registration interrupted")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + registerDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); + throw re; + } + } + + // + // Update Functions + // + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") + public void testUpdateFunctionMissingTenant() throws Exception { + try { + testUpdateFunctionMissingArguments( + null, + namespace, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "Tenant is not provided"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") + public void testUpdateFunctionMissingNamespace() throws Exception { + try { + testUpdateFunctionMissingArguments( + tenant, + null, + function, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "Namespace is not provided"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") + public void testUpdateFunctionMissingFunctionName() throws Exception { + try { + testUpdateFunctionMissingArguments( + tenant, + namespace, + null, + mockedInputStream, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "Function name is not provided"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") + public void testUpdateFunctionMissingPackage() throws Exception { + try { + mockWorkerUtils(); + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "Update contains no change"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") + public void testUpdateFunctionMissingInputTopic() throws Exception { + try { + mockWorkerUtils(); + + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + null, + mockedFormData, + outputTopic, + outputSerdeClassName, + className, + parallelism, + "Update contains no change"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") + public void testUpdateFunctionMissingClassName() throws Exception { + try { + mockWorkerUtils(); + + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + null, + parallelism, + "Update contains no change"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test + public void testUpdateFunctionChangedParallelism() throws Exception { + try { + mockWorkerUtils(); + + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + mockedFormData, + outputTopic, + outputSerdeClassName, + null, + parallelism + 1, + null); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test + public void testUpdateFunctionChangedInputs() throws Exception { + mockWorkerUtils(); + + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + topicsToSerDeClassName, + mockedFormData, + "DifferentOutput", + outputSerdeClassName, + null, + parallelism, + null); + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Input Topics cannot be altered") + public void testUpdateFunctionChangedOutput() throws Exception { + try { + mockWorkerUtils(); + + Map someOtherInput = new HashMap<>(); + someOtherInput.put("DifferentTopic", TopicSchema.DEFAULT_SERDE); + testUpdateFunctionMissingArguments( + tenant, + namespace, + function, + null, + someOtherInput, + mockedFormData, + outputTopic, + outputSerdeClassName, + null, + parallelism, + "Input Topics cannot be altered"); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + private void testUpdateFunctionMissingArguments( + String tenant, + String namespace, + String function, + InputStream inputStream, + Map topicsToSerDeClassName, + FormDataContentDisposition details, + String outputTopic, + String outputSerdeClassName, + String className, + Integer parallelism, + String expectedError) throws Exception { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + FunctionConfig functionConfig = new FunctionConfig(); + if (tenant != null) { + functionConfig.setTenant(tenant); + } + if (namespace != null) { + functionConfig.setNamespace(namespace); + } + if (function != null) { + functionConfig.setName(function); + } + if (topicsToSerDeClassName != null) { + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + } + if (outputTopic != null) { + functionConfig.setOutput(outputTopic); + } + if (outputSerdeClassName != null) { + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + } + if (className != null) { + functionConfig.setClassName(className); + } + if (parallelism != null) { + functionConfig.setParallelism(parallelism); + } + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + + if (expectedError != null) { + doThrow(new IllegalArgumentException(expectedError)) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + } + + updateFunction( + tenant, + namespace, + function, + inputStream, + details, + null, + functionConfig, + null, null); + + } + + private void updateDefaultFunction() throws IOException { + updateDefaultFunctionWithPackageUrl(null); + } + + private void updateDefaultFunctionWithPackageUrl(String packageUrl) throws IOException { + FunctionConfig functionConfig = new FunctionConfig(); + functionConfig.setTenant(tenant); + functionConfig.setNamespace(namespace); + functionConfig.setName(function); + functionConfig.setClassName(className); + functionConfig.setParallelism(parallelism); + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + functionConfig.setOutput(outputTopic); + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + + updateFunction( + tenant, + namespace, + function, + mockedInputStream, + mockedFormData, + packageUrl, + functionConfig, + null, null); + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't" + + " exist") + public void testUpdateNotExistedFunction() throws IOException { + try { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + updateDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") + public void testUpdateFunctionUploadFailure() throws Exception { + try { + mockWorkerUtils(ctx -> { + ctx.when(() -> { + WorkerUtils.uploadFileToBookkeeper( + anyString(), + any(File.class), + any(Namespace.class)); + + }).thenThrow(new IOException("upload failure")); + }); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + updateDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); + throw re; + } + } + + @Test + public void testUpdateFunctionSuccess() throws Exception { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + updateDefaultFunction(); + } + + @Test + public void testUpdateFunctionWithUrl() throws IOException { + Configurator.setRootLevel(Level.DEBUG); + + String fileLocation = FutureUtil.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + String filePackageUrl = "file://" + fileLocation; + + FunctionConfig functionConfig = new FunctionConfig(); + functionConfig.setOutput(outputTopic); + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + functionConfig.setTenant(tenant); + functionConfig.setNamespace(namespace); + functionConfig.setName(function); + functionConfig.setClassName(className); + functionConfig.setParallelism(parallelism); + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + updateFunction( + tenant, + namespace, + function, + null, + null, + filePackageUrl, + functionConfig, + null, null); + + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") + public void testUpdateFunctionFailure() throws Exception { + try { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + doThrow(new IllegalArgumentException("function failed to register")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + updateDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registeration " + + "interrupted") + public void testUpdateFunctionInterrupted() throws Exception { + try { + mockWorkerUtils(); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + doThrow(new IllegalStateException("Function registeration interrupted")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + updateDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); + throw re; + } + } + + + @Test(timeOut = 20000) + public void testUpdateFunctionSuccessWithPackageName() throws IOException { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + updateDefaultFunctionWithPackageUrl("function://public/default/test@v1"); + } + + @Test(timeOut = 20000) + public void testUpdateFunctionFailedWithWrongPackageName() throws PulsarAdminException, IOException { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + try { + doThrow(new PulsarAdminException("package name is invalid")) + .when(mockedPackages).download(anyString(), anyString()); + registerDefaultFunctionWithPackageUrl("function://"); + } catch (RestException e) { + // expected exception + assertEquals(e.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + } + } + + // + // deregister function + // + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") + public void testDeregisterFunctionMissingTenant() { + try { + + testDeregisterFunctionMissingArguments( + null, + namespace, + function + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") + public void testDeregisterFunctionMissingNamespace() { + try { + testDeregisterFunctionMissingArguments( + tenant, + null, + function + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") + public void testDeregisterFunctionMissingFunctionName() { + try { + testDeregisterFunctionMissingArguments( + tenant, + namespace, + null + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't" + + " exist") + public void testDeregisterNotExistedFunction() { + try { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + deregisterDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.NOT_FOUND); + throw re; + } + } + + @Test + public void testDeregisterFunctionSuccess() { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + deregisterDefaultFunction(); + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to deregister") + public void testDeregisterFunctionFailure() throws Exception { + try { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + doThrow(new IllegalArgumentException("function failed to deregister")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + deregisterDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function deregisteration " + + "interrupted") + public void testDeregisterFunctionInterrupted() throws Exception { + try { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + doThrow(new IllegalStateException("Function deregisteration interrupted")) + .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); + + deregisterDefaultFunction(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); + throw re; + } + } + + // + // Get Function Info + // + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") + public void testGetFunctionMissingTenant() throws IOException { + try { + testGetFunctionMissingArguments( + null, + namespace, + function + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") + public void testGetFunctionMissingNamespace() throws IOException { + try { + testGetFunctionMissingArguments( + tenant, + null, + function + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") + public void testGetFunctionMissingFunctionName() throws IOException { + try { + testGetFunctionMissingArguments( + tenant, + namespace, + null + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + // + // List Functions + // + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") + public void testListFunctionsMissingTenant() { + try { + testListFunctionsMissingArguments( + null, + namespace + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") + public void testListFunctionsMissingNamespace() { + try { + testListFunctionsMissingArguments( + tenant, + null + ); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); + throw re; + } + } + + @Test + public void testDownloadFunctionHttpUrl() throws Exception { + String jarHttpUrl = + "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; + File pkgFile = downloadFunction(jarHttpUrl, null); + pkgFile.delete(); + } + + @Test + public void testDownloadFunctionFile() throws Exception { + File file = getPulsarApiExamplesNar(); + File pkgFile = downloadFunction(file.toURI().toString(), null); + Assert.assertTrue(pkgFile.exists()); + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); + } + + @Test + public void testDownloadFunctionBuiltinConnector() throws Exception { + File file = getPulsarApiExamplesNar(); + + WorkerConfig config = new WorkerConfig() + .setUploadBuiltinSinksSources(false); + when(mockedWorkerService.getWorkerConfig()).thenReturn(config); + + registerBuiltinConnector("cassandra", file); + + File pkgFile = downloadFunction("builtin://cassandra", null); + Assert.assertTrue(pkgFile.exists()); + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); + } + + @Test + public void testDownloadFunctionBuiltinFunction() throws Exception { + File file = getPulsarApiExamplesNar(); + + WorkerConfig config = new WorkerConfig() + .setUploadBuiltinSinksSources(false); + when(mockedWorkerService.getWorkerConfig()).thenReturn(config); + + registerBuiltinFunction("exclamation", file); + + File pkgFile = downloadFunction("builtin://exclamation", null); + Assert.assertTrue(pkgFile.exists()); + Assert.assertEquals(file.length(), pkgFile.length()); + pkgFile.delete(); + } + + @Test + public void testRegisterFunctionFileUrlWithValidSinkClass() throws Exception { + Configurator.setRootLevel(Level.DEBUG); + + File file = getPulsarApiExamplesNar(); + String filePackageUrl = file.toURI().toString(); + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + + FunctionConfig functionConfig = new FunctionConfig(); + functionConfig.setTenant(tenant); + functionConfig.setNamespace(namespace); + functionConfig.setName(function); + functionConfig.setClassName(className); + functionConfig.setParallelism(parallelism); + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + functionConfig.setOutput(outputTopic); + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + registerFunction(tenant, namespace, function, null, null, filePackageUrl, functionConfig); + + } + + @Test + public void testRegisterFunctionWithConflictingFields() throws Exception { + Configurator.setRootLevel(Level.DEBUG); + String actualTenant = "DIFFERENT_TENANT"; + String actualNamespace = "DIFFERENT_NAMESPACE"; + String actualName = "DIFFERENT_NAME"; + this.namespaceList.add(actualTenant + "/" + actualNamespace); + + File file = getPulsarApiExamplesNar(); + String filePackageUrl = file.toURI().toString(); + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + when(mockedManager.containsFunction(eq(actualTenant), eq(actualNamespace), eq(actualName))).thenReturn(false); + + FunctionConfig functionConfig = new FunctionConfig(); + functionConfig.setTenant(tenant); + functionConfig.setNamespace(namespace); + functionConfig.setName(function); + functionConfig.setClassName(className); + functionConfig.setParallelism(parallelism); + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + functionConfig.setOutput(outputTopic); + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + registerFunction(actualTenant, actualNamespace, actualName, null, null, filePackageUrl, functionConfig); + } + + public static FunctionConfig createDefaultFunctionConfig() { + FunctionConfig functionConfig = new FunctionConfig(); + functionConfig.setTenant(tenant); + functionConfig.setNamespace(namespace); + functionConfig.setName(function); + functionConfig.setClassName(className); + functionConfig.setParallelism(parallelism); + functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); + functionConfig.setOutput(outputTopic); + functionConfig.setOutputSerdeClassName(outputSerdeClassName); + functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); + return functionConfig; + } + + public static FunctionDetails createDefaultFunctionDetails() { + FunctionConfig functionConfig = createDefaultFunctionConfig(); + return FunctionConfigUtils.convert(functionConfig); + } +} diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionsResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionsResourceTest.java new file mode 100644 index 0000000000000..51ca4c83f9e02 --- /dev/null +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/AbstractFunctionsResourceTest.java @@ -0,0 +1,337 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.worker.rest.api.v3; + +import static org.apache.pulsar.functions.source.TopicSchema.DEFAULT_SERDE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.lang.reflect.Method; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Consumer; +import org.apache.distributedlog.api.namespace.Namespace; +import org.apache.pulsar.client.admin.Functions; +import org.apache.pulsar.client.admin.Namespaces; +import org.apache.pulsar.client.admin.Packages; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.Tenants; +import org.apache.pulsar.common.functions.FunctionDefinition; +import org.apache.pulsar.common.io.ConnectorDefinition; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.functions.instance.InstanceUtils; +import org.apache.pulsar.functions.proto.Function; +import org.apache.pulsar.functions.runtime.RuntimeFactory; +import org.apache.pulsar.functions.source.TopicSchema; +import org.apache.pulsar.functions.utils.LoadedFunctionPackage; +import org.apache.pulsar.functions.utils.ValidatableFunctionPackage; +import org.apache.pulsar.functions.utils.functions.FunctionArchive; +import org.apache.pulsar.functions.utils.functions.FunctionUtils; +import org.apache.pulsar.functions.utils.io.Connector; +import org.apache.pulsar.functions.utils.io.ConnectorUtils; +import org.apache.pulsar.functions.worker.ConnectorsManager; +import org.apache.pulsar.functions.worker.FunctionMetaDataManager; +import org.apache.pulsar.functions.worker.FunctionRuntimeManager; +import org.apache.pulsar.functions.worker.FunctionsManager; +import org.apache.pulsar.functions.worker.LeaderService; +import org.apache.pulsar.functions.worker.PackageUrlValidator; +import org.apache.pulsar.functions.worker.PulsarWorkerService; +import org.apache.pulsar.functions.worker.WorkerConfig; +import org.apache.pulsar.functions.worker.WorkerUtils; +import org.apache.pulsar.functions.worker.rest.api.PulsarFunctionTestTemporaryDirectory; +import org.glassfish.jersey.media.multipart.FormDataContentDisposition; +import org.mockito.Answers; +import org.mockito.MockSettings; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; + +public abstract class AbstractFunctionsResourceTest { + + protected static final String tenant = "test-tenant"; + protected static final String namespace = "test-namespace"; + protected static final Map topicsToSerDeClassName = new HashMap<>(); + protected static final String subscriptionName = "test-subscription"; + protected static final String CASSANDRA_STRING_SINK = "org.apache.pulsar.io.cassandra.CassandraStringSink"; + protected static final int parallelism = 1; + private static final String SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH = "pulsar-io-cassandra.nar.path"; + private static final String SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH = "pulsar-io-twitter.nar.path"; + private static final String SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH = "pulsar-io-invalid.nar.path"; + private static final String SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH = + "pulsar-functions-api-examples.nar.path"; + protected static Map mockStaticContexts = new HashMap<>(); + + static { + topicsToSerDeClassName.put("test_src", DEFAULT_SERDE); + topicsToSerDeClassName.put("persistent://public/default/test_src", TopicSchema.DEFAULT_SERDE); + } + + protected PulsarWorkerService mockedWorkerService; + protected PulsarAdmin mockedPulsarAdmin; + protected Tenants mockedTenants; + protected Namespaces mockedNamespaces; + protected Functions mockedFunctions; + protected TenantInfoImpl mockedTenantInfo; + protected List namespaceList = new LinkedList<>(); + protected FunctionMetaDataManager mockedManager; + protected FunctionRuntimeManager mockedFunctionRunTimeManager; + protected RuntimeFactory mockedRuntimeFactory; + protected Namespace mockedNamespace; + protected InputStream mockedInputStream; + protected FormDataContentDisposition mockedFormData; + protected Function.FunctionMetaData mockedFunctionMetaData; + protected LeaderService mockedLeaderService; + protected Packages mockedPackages; + protected PulsarFunctionTestTemporaryDirectory tempDirectory; + protected ConnectorsManager connectorsManager = new ConnectorsManager(); + protected FunctionsManager functionsManager = new FunctionsManager(); + + public static File getPulsarIOCassandraNar() { + return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH) + , "pulsar-io-cassandra.nar file location must be specified with " + + SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH + " system property")); + } + + public static File getPulsarIOTwitterNar() { + return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH) + , "pulsar-io-twitter.nar file location must be specified with " + + SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH + " system property")); + } + + public static File getPulsarIOInvalidNar() { + return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH) + , "invalid nar file location must be specified with " + + SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH + " system property")); + } + + public static File getPulsarApiExamplesNar() { + return new File(Objects.requireNonNull( + System.getProperty(SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH) + , "pulsar-functions-api-examples.nar file location must be specified with " + + SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH + " system property")); + } + + @BeforeMethod + public final void setup(Method method) throws Exception { + this.mockedManager = mock(FunctionMetaDataManager.class); + this.mockedFunctionRunTimeManager = mock(FunctionRuntimeManager.class); + this.mockedRuntimeFactory = mock(RuntimeFactory.class); + this.mockedInputStream = mock(InputStream.class); + this.mockedNamespace = mock(Namespace.class); + this.mockedFormData = mock(FormDataContentDisposition.class); + when(mockedFormData.getFileName()).thenReturn("test"); + this.mockedTenantInfo = mock(TenantInfoImpl.class); + this.mockedPulsarAdmin = mock(PulsarAdmin.class); + this.mockedTenants = mock(Tenants.class); + this.mockedNamespaces = mock(Namespaces.class); + this.mockedFunctions = mock(Functions.class); + this.mockedLeaderService = mock(LeaderService.class); + this.mockedPackages = mock(Packages.class); + namespaceList.add(tenant + "/" + namespace); + + this.mockedWorkerService = mock(PulsarWorkerService.class); + when(mockedWorkerService.getFunctionMetaDataManager()).thenReturn(mockedManager); + when(mockedWorkerService.getLeaderService()).thenReturn(mockedLeaderService); + when(mockedWorkerService.getFunctionRuntimeManager()).thenReturn(mockedFunctionRunTimeManager); + when(mockedFunctionRunTimeManager.getRuntimeFactory()).thenReturn(mockedRuntimeFactory); + when(mockedWorkerService.getDlogNamespace()).thenReturn(mockedNamespace); + when(mockedWorkerService.isInitialized()).thenReturn(true); + when(mockedWorkerService.getBrokerAdmin()).thenReturn(mockedPulsarAdmin); + when(mockedWorkerService.getFunctionAdmin()).thenReturn(mockedPulsarAdmin); + when(mockedPulsarAdmin.tenants()).thenReturn(mockedTenants); + when(mockedPulsarAdmin.namespaces()).thenReturn(mockedNamespaces); + when(mockedPulsarAdmin.functions()).thenReturn(mockedFunctions); + when(mockedPulsarAdmin.packages()).thenReturn(mockedPackages); + when(mockedTenants.getTenantInfo(any())).thenReturn(mockedTenantInfo); + when(mockedNamespaces.getNamespaces(any())).thenReturn(namespaceList); + when(mockedLeaderService.isLeader()).thenReturn(true); + doAnswer(invocationOnMock -> { + Files.copy(getDefaultNarFile().toPath(), Paths.get(invocationOnMock.getArgument(1, String.class)), + StandardCopyOption.REPLACE_EXISTING); + return null; + }).when(mockedPackages).download(any(), any()); + + // worker config + List urlPatterns = + List.of("http://localhost.*", "file:.*", "https://repo1.maven.org/maven2/org/apache/pulsar/.*"); + WorkerConfig workerConfig = new WorkerConfig() + .setWorkerId("test") + .setWorkerPort(8080) + .setFunctionMetadataTopicName("pulsar/functions") + .setNumFunctionPackageReplicas(3) + .setPulsarServiceUrl("pulsar://localhost:6650/") + .setAdditionalEnabledFunctionsUrlPatterns(urlPatterns) + .setAdditionalEnabledConnectorUrlPatterns(urlPatterns) + .setFunctionsWorkerEnablePackageManagement(true); + customizeWorkerConfig(workerConfig, method); + tempDirectory = PulsarFunctionTestTemporaryDirectory.create(getClass().getSimpleName()); + tempDirectory.useTemporaryDirectoriesForWorkerConfig(workerConfig); + when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); + when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); + when(mockedWorkerService.getConnectorsManager()).thenReturn(connectorsManager); + PackageUrlValidator packageUrlValidator = new PackageUrlValidator(workerConfig); + when(mockedWorkerService.getPackageUrlValidator()).thenReturn(packageUrlValidator); + + doSetup(); + } + + protected void customizeWorkerConfig(WorkerConfig workerConfig, Method method) { + + } + + protected File getDefaultNarFile() { + return getPulsarIOTwitterNar(); + } + + protected void doSetup() throws Exception { + + } + + @AfterMethod(alwaysRun = true) + public void cleanup() { + if (tempDirectory != null) { + tempDirectory.delete(); + } + mockStaticContexts.values().forEach(MockedStatic::close); + mockStaticContexts.clear(); + } + + protected void mockStatic(Class classStatic, Consumer> consumer) { + mockStatic(classStatic, withSettings().defaultAnswer(Mockito.CALLS_REAL_METHODS), consumer); + } + + private void mockStatic(Class classStatic, MockSettings mockSettings, Consumer> consumer) { + final MockedStatic mockedStatic = mockStaticContexts.computeIfAbsent(classStatic.getName(), + name -> Mockito.mockStatic(classStatic, mockSettings)); + consumer.accept(mockedStatic); + } + + protected void mockWorkerUtils() { + mockWorkerUtils(null); + } + + protected void mockWorkerUtils(Consumer> consumer) { + mockStatic(WorkerUtils.class, withSettings(), ctx -> { + // make dumpToTmpFile return the nar file copy + ctx.when(() -> WorkerUtils.dumpToTmpFile(mockedInputStream)) + .thenAnswer(invocation -> { + Path tempFile = Files.createTempFile("test", ".nar"); + Files.copy(getPulsarApiExamplesNar().toPath(), tempFile, + StandardCopyOption.REPLACE_EXISTING); + return tempFile.toFile(); + }); + ctx.when(() -> WorkerUtils.dumpToTmpFile(any())) + .thenAnswer(Answers.CALLS_REAL_METHODS); + if (consumer != null) { + consumer.accept(ctx); + } + }); + } + + protected void mockInstanceUtils() { + mockStatic(InstanceUtils.class, ctx -> { + ctx.when(() -> InstanceUtils.calculateSubjectType(any())) + .thenReturn(getComponentType()); + }); + } + + protected abstract Function.FunctionDetails.ComponentType getComponentType(); + + public static class LoadedConnector extends Connector { + public LoadedConnector(ConnectorDefinition connectorDefinition) { + super(null, connectorDefinition, null, true); + } + + @Override + public ValidatableFunctionPackage getConnectorFunctionPackage() { + return new LoadedFunctionPackage(getClass().getClassLoader(), ConnectorDefinition.class, + getConnectorDefinition()); + } + } + + + protected void registerBuiltinConnector(String connectorType, String className) { + ConnectorDefinition connectorDefinition = null; + if (className != null) { + connectorDefinition = new ConnectorDefinition(); + // set source and sink class to the same to simplify the test + connectorDefinition.setSinkClass(className); + connectorDefinition.setSourceClass(className); + } + connectorsManager.addConnector(connectorType, new LoadedConnector(connectorDefinition)); + } + + protected void registerBuiltinConnector(String connectorType, File packageFile) { + ConnectorDefinition cntDef; + try { + cntDef = ConnectorUtils.getConnectorDefinition(packageFile); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + connectorsManager.addConnector(connectorType, + new Connector(packageFile.toPath(), cntDef, NarClassLoader.DEFAULT_NAR_EXTRACTION_DIR, true)); + } + + public static class LoadedFunctionArchive extends FunctionArchive { + public LoadedFunctionArchive(FunctionDefinition functionDefinition) { + super(null, functionDefinition, null, true); + } + + @Override + public ValidatableFunctionPackage getFunctionPackage() { + return new LoadedFunctionPackage(getClass().getClassLoader(), FunctionDefinition.class, + getFunctionDefinition()); + } + } + + protected void registerBuiltinFunction(String functionType, String className) { + FunctionDefinition functionDefinition = null; + if (className != null) { + functionDefinition = new FunctionDefinition(); + functionDefinition.setFunctionClass(className); + } + functionsManager.addFunction(functionType, new LoadedFunctionArchive(functionDefinition)); + } + + protected void registerBuiltinFunction(String functionType, File packageFile) { + FunctionDefinition cntDef; + try { + cntDef = FunctionUtils.getFunctionDefinition(packageFile); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + functionsManager.addFunction(functionType, + new FunctionArchive(packageFile.toPath(), cntDef, NarClassLoader.DEFAULT_NAR_EXTRACTION_DIR, true)); + } +} diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java index 0c20083bb89ca..a1a418460be45 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java @@ -18,1791 +18,168 @@ */ package org.apache.pulsar.functions.worker.rest.api.v3; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.google.common.collect.Lists; import java.io.File; -import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.net.URL; -import java.nio.file.Paths; -import java.util.HashMap; -import java.util.LinkedList; import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.UUID; -import java.util.function.Consumer; import javax.ws.rs.core.Response; import javax.ws.rs.core.StreamingOutput; import org.apache.distributedlog.api.namespace.Namespace; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.core.config.Configurator; import org.apache.pulsar.broker.authentication.AuthenticationParameters; -import org.apache.pulsar.client.admin.Functions; -import org.apache.pulsar.client.admin.Namespaces; -import org.apache.pulsar.client.admin.Packages; -import org.apache.pulsar.client.admin.PulsarAdmin; -import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.admin.Tenants; import org.apache.pulsar.common.functions.FunctionConfig; import org.apache.pulsar.common.functions.UpdateOptionsImpl; -import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.RestException; -import org.apache.pulsar.functions.api.Context; -import org.apache.pulsar.functions.api.Function; -import org.apache.pulsar.functions.instance.InstanceUtils; -import org.apache.pulsar.functions.proto.Function.FunctionDetails; -import org.apache.pulsar.functions.proto.Function.FunctionMetaData; -import org.apache.pulsar.functions.proto.Function.PackageLocationMetaData; -import org.apache.pulsar.functions.proto.Function.ProcessingGuarantees; -import org.apache.pulsar.functions.proto.Function.SinkSpec; -import org.apache.pulsar.functions.proto.Function.SourceSpec; -import org.apache.pulsar.functions.proto.Function.SubscriptionType; -import org.apache.pulsar.functions.runtime.RuntimeFactory; -import org.apache.pulsar.functions.source.TopicSchema; -import org.apache.pulsar.functions.utils.FunctionCommon; +import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.utils.FunctionConfigUtils; -import org.apache.pulsar.functions.utils.functions.FunctionArchive; -import org.apache.pulsar.functions.utils.io.Connector; -import org.apache.pulsar.functions.worker.ConnectorsManager; -import org.apache.pulsar.functions.worker.FunctionMetaDataManager; -import org.apache.pulsar.functions.worker.FunctionRuntimeManager; -import org.apache.pulsar.functions.worker.FunctionsManager; -import org.apache.pulsar.functions.worker.LeaderService; -import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.rest.api.FunctionsImpl; -import org.apache.pulsar.functions.worker.rest.api.PulsarFunctionTestTemporaryDirectory; -import org.apache.pulsar.functions.worker.rest.api.v2.FunctionsApiV2Resource; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; -import org.mockito.MockedStatic; -import org.mockito.Mockito; import org.testng.Assert; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -/** - * Unit test of {@link FunctionsApiV2Resource}. - */ -public class FunctionApiV3ResourceTest { - - private static final class TestFunction implements Function { - - @Override - public String process(String input, Context context) { - return input; - } - } - - private static final class WrongFunction implements Consumer { - @Override - public void accept(String s) { - - } - } - - private static final String tenant = "test-tenant"; - private static final String namespace = "test-namespace"; - private static final String function = "test-function"; - private static final String outputTopic = "test-output-topic"; - private static final String outputSerdeClassName = TopicSchema.DEFAULT_SERDE; - private static final String className = TestFunction.class.getName(); - private SubscriptionType subscriptionType = SubscriptionType.FAILOVER; - private static final Map topicsToSerDeClassName = new HashMap<>(); - static { - topicsToSerDeClassName.put("persistent://public/default/test_src", TopicSchema.DEFAULT_SERDE); - } - private static final int parallelism = 1; - - private PulsarWorkerService mockedWorkerService; - private PulsarAdmin mockedPulsarAdmin; - private Tenants mockedTenants; - private Namespaces mockedNamespaces; - private Functions mockedFunctions; - private TenantInfoImpl mockedTenantInfo; - private List namespaceList = new LinkedList<>(); - private FunctionMetaDataManager mockedManager; - private FunctionRuntimeManager mockedFunctionRunTimeManager; - private RuntimeFactory mockedRuntimeFactory; - private Namespace mockedNamespace; +public class FunctionApiV3ResourceTest extends AbstractFunctionApiResourceTest { private FunctionsImpl resource; - private InputStream mockedInputStream; - private FormDataContentDisposition mockedFormData; - private FunctionMetaData mockedFunctionMetadata; - private LeaderService mockedLeaderService; - private Packages mockedPackages; - private PulsarFunctionTestTemporaryDirectory tempDirectory; - private static Map mockStaticContexts = new HashMap<>(); - - private static final String SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH = - "pulsar-functions-api-examples.nar.path"; - - public static File getPulsarApiExamplesNar() { - return new File(Objects.requireNonNull( - System.getProperty(SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH) - , "pulsar-functions-api-examples.nar file location must be specified with " - + SYSTEM_PROPERTY_NAME_FUNCTIONS_API_EXAMPLES_NAR_FILE_PATH + " system property")); - } - - @BeforeMethod - public void setup() throws Exception { - this.mockedManager = mock(FunctionMetaDataManager.class); - this.mockedFunctionRunTimeManager = mock(FunctionRuntimeManager.class); - this.mockedTenantInfo = mock(TenantInfoImpl.class); - this.mockedRuntimeFactory = mock(RuntimeFactory.class); - this.mockedInputStream = mock(InputStream.class); - this.mockedNamespace = mock(Namespace.class); - this.mockedFormData = mock(FormDataContentDisposition.class); - when(mockedFormData.getFileName()).thenReturn("test"); - this.mockedPulsarAdmin = mock(PulsarAdmin.class); - this.mockedTenants = mock(Tenants.class); - this.mockedNamespaces = mock(Namespaces.class); - this.mockedFunctions = mock(Functions.class); - this.mockedPackages = mock(Packages.class); - this.mockedLeaderService = mock(LeaderService.class); - this.mockedFunctionMetadata = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); - namespaceList.add(tenant + "/" + namespace); - - this.mockedWorkerService = mock(PulsarWorkerService.class); - when(mockedWorkerService.getFunctionMetaDataManager()).thenReturn(mockedManager); - when(mockedWorkerService.getFunctionRuntimeManager()).thenReturn(mockedFunctionRunTimeManager); - when(mockedWorkerService.getLeaderService()).thenReturn(mockedLeaderService); - when(mockedFunctionRunTimeManager.getRuntimeFactory()).thenReturn(mockedRuntimeFactory); - when(mockedWorkerService.getDlogNamespace()).thenReturn(mockedNamespace); - when(mockedWorkerService.isInitialized()).thenReturn(true); - when(mockedWorkerService.getBrokerAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedWorkerService.getFunctionAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedPulsarAdmin.tenants()).thenReturn(mockedTenants); - when(mockedPulsarAdmin.namespaces()).thenReturn(mockedNamespaces); - when(mockedPulsarAdmin.functions()).thenReturn(mockedFunctions); - when(mockedPulsarAdmin.packages()).thenReturn(mockedPackages); - when(mockedTenants.getTenantInfo(any())).thenReturn(mockedTenantInfo); - when(mockedNamespaces.getNamespaces(any())).thenReturn(namespaceList); - when(mockedLeaderService.isLeader()).thenReturn(true); - when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetadata); - doNothing().when(mockedPackages).download(anyString(), anyString()); - - // worker config - WorkerConfig workerConfig = new WorkerConfig() - .setWorkerId("test") - .setWorkerPort(8080) - .setFunctionMetadataTopicName("pulsar/functions") - .setNumFunctionPackageReplicas(3) - .setPulsarServiceUrl("pulsar://localhost:6650/"); - tempDirectory = PulsarFunctionTestTemporaryDirectory.create(getClass().getSimpleName()); - tempDirectory.useTemporaryDirectoriesForWorkerConfig(workerConfig); - when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); - + @Override + protected void doSetup() { + super.doSetup(); this.resource = spy(new FunctionsImpl(() -> mockedWorkerService)); } - @AfterMethod(alwaysRun = true) - public void cleanup() { - if (tempDirectory != null) { - tempDirectory.delete(); - } - mockStaticContexts.values().forEach(MockedStatic::close); - mockStaticContexts.clear(); - } - - private void mockStatic(Class classStatic, Consumer> consumer) { - final MockedStatic mockedStatic = mockStaticContexts.computeIfAbsent(classStatic.getName(), name -> Mockito.mockStatic(classStatic)); - consumer.accept(mockedStatic); - } - - private void mockWorkerUtils() { - mockWorkerUtils(null); - } - - private void mockWorkerUtils(Consumer> consumer) { - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - if (consumer != null) { - consumer.accept(ctx); - } - }); - } - - private void mockInstanceUtils() { - mockStatic(InstanceUtils.class, ctx -> { - ctx.when(() -> InstanceUtils.calculateSubjectType(any())) - .thenReturn(FunctionDetails.ComponentType.FUNCTION); - }); - } - - // - // Register Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testRegisterFunctionMissingTenant() { - try { - testRegisterFunctionMissingArguments( - null, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testRegisterFunctionMissingNamespace() { - try { - testRegisterFunctionMissingArguments( - tenant, - null, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testRegisterFunctionMissingFunctionName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - null, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function package is not provided") - public void testRegisterFunctionMissingPackage() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "No input topic\\(s\\) specified for the function") - public void testRegisterFunctionMissingInputTopics() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - null, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function Package is not provided") - public void testRegisterFunctionMissingPackageDetails() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - null, - outputTopic, - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function package does not have" - + " the correct format. Pulsar cannot determine if the package is a NAR package or JAR package. Function " - + "classname is not provided and attempts to load it as a NAR package produced the following error.*") - public void testRegisterFunctionMissingClassName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function class UnknownClass must be in class path") - public void testRegisterFunctionWrongClassName() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - "UnknownClass", - parallelism, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function parallelism must be a positive number") - public void testRegisterFunctionWrongParallelism() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - -2, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, - expectedExceptionsMessageRegExp = "Output topic persistent://public/default/test_src is also being used as an input topic \\(topics must be one or the other\\)") - public void testRegisterFunctionSameInputOutput() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - topicsToSerDeClassName.keySet().iterator().next(), - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Output topic " + function + "-output-topic/test:" + " is invalid") - public void testRegisterFunctionWrongOutputTopic() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - function + "-output-topic/test:", - outputSerdeClassName, - className, - parallelism, - null); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Encountered error .*. when getting Function package from .*") - public void testRegisterFunctionHttpUrl() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - null, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "http://localhost:1234/test"); - } catch (RestException re){ - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function class .*. does not implement the correct interface") - public void testRegisterFunctionImplementWrongInterface() { - try { - testRegisterFunctionMissingArguments( - tenant, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - WrongFunction.class.getName(), - parallelism, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testRegisterFunctionMissingArguments( - String tenant, - String namespace, - String function, - InputStream inputStream, - Map topicsToSerDeClassName, - FormDataContentDisposition details, - String outputTopic, - String outputSerdeClassName, - String className, - Integer parallelism, - String functionPkgUrl) { - FunctionConfig functionConfig = new FunctionConfig(); - if (tenant != null) { - functionConfig.setTenant(tenant); - } - if (namespace != null) { - functionConfig.setNamespace(namespace); - } - if (function != null) { - functionConfig.setName(function); - } - if (topicsToSerDeClassName != null) { - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - } - if (outputTopic != null) { - functionConfig.setOutput(outputTopic); - } - if (outputSerdeClassName != null) { - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - } - if (className != null) { - functionConfig.setClassName(className); - } - if (parallelism != null) { - functionConfig.setParallelism(parallelism); - } - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - - resource.registerFunction( - tenant, - namespace, - function, - inputStream, - details, - functionPkgUrl, - functionConfig, - null); - - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function config is not provided") - public void testMissingFunctionConfig() { - resource.registerFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - null, - null, - null); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function config is not provided") - public void testUpdateMissingFunctionConfig() { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - resource.updateFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - null, - null, - null, null); - } - - @Test - public void testUpdateSourceWithNoChange() throws ClassNotFoundException { - mockWorkerUtils(); - - FunctionDetails functionDetails = createDefaultFunctionDetails(); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.getFunctionTypes(any(FunctionConfig.class), any(Class.class))).thenReturn(new Class[]{String.class, String.class}); - ctx.when(() -> FunctionCommon.convertRuntime(any(FunctionConfig.Runtime.class))).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.isFunctionCodeBuiltin(any())).thenReturn(true); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(),any(),any(),any())).thenCallRealMethod(); - ctx.when(FunctionCommon::createPkgTempFile).thenCallRealMethod(); - }); - - doReturn(Function.class).when(mockedClassLoader).loadClass(anyString()); - - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedFunctionsManager.getFunctionArchive(any())).thenReturn(getPulsarApiExamplesNar().toPath()); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - // No change on config, - FunctionConfig funcConfig = createDefaultFunctionConfig(); - mockStatic(FunctionConfigUtils.class, ctx -> { - ctx.when(() -> FunctionConfigUtils.convertFromDetails(any())).thenReturn(funcConfig); - ctx.when(() -> FunctionConfigUtils.validateUpdate(any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionConfigUtils.convert(any(FunctionConfig.class), any(ClassLoader.class))).thenReturn(functionDetails); - ctx.when(() -> FunctionConfigUtils.convert(any(FunctionConfig.class), any(FunctionConfigUtils.ExtractedFunctionDetails.class))).thenReturn(functionDetails); - ctx.when(() -> FunctionConfigUtils.validateJavaFunction(any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionConfigUtils.doCommonChecks(any())).thenCallRealMethod(); - ctx.when(() -> FunctionConfigUtils.collectAllInputTopics(any())).thenCallRealMethod(); - ctx.when(() -> FunctionConfigUtils.doJavaChecks(any(), any())).thenCallRealMethod(); - }); - - // config has not changes and don't update auth, should fail - try { - resource.updateFunction( - funcConfig.getTenant(), - funcConfig.getNamespace(), - funcConfig.getName(), - null, - mockedFormData, - null, - funcConfig, - null, - null); - fail("Update without changes should fail"); - } catch (RestException e) { - assertTrue(e.getMessage().contains("Update contains no change")); - } - - try { - UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); - updateOptions.setUpdateAuthData(false); - resource.updateFunction( - funcConfig.getTenant(), - funcConfig.getNamespace(), - funcConfig.getName(), - null, - mockedFormData, - null, - funcConfig, - null, - updateOptions); - fail("Update without changes should fail"); - } catch (RestException e) { - assertTrue(e.getMessage().contains("Update contains no change")); - } - - // no changes but set the auth-update flag to true, should not fail - UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); - updateOptions.setUpdateAuthData(true); - resource.updateFunction( - funcConfig.getTenant(), - funcConfig.getNamespace(), - funcConfig.getName(), - null, - mockedFormData, - null, - funcConfig, - null, - updateOptions); - } - - - private void registerDefaultFunction() { - registerDefaultFunctionWithPackageUrl(null); - } - - private void registerDefaultFunctionWithPackageUrl(String packageUrl) { - FunctionConfig functionConfig = createDefaultFunctionConfig(); - resource.registerFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - packageUrl, - functionConfig, - null); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function already exists") - public void testRegisterExistedFunction() { - try { - Configurator.setRootLevel(Level.DEBUG); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") - public void testRegisterFunctionUploadFailure() throws Exception { - try { - mockWorkerUtils(ctx -> { - ctx.when(() -> { - WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class)); - } - ).thenThrow(new IOException("upload failure")); - ; - }); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - @Test - public void testRegisterFunctionSuccess() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(timeOut = 20000) - public void testRegisterFunctionSuccessWithPackageName() { - registerDefaultFunctionWithPackageUrl("function://public/default/test@v1"); - } - - @Test(timeOut = 20000) - public void testRegisterFunctionFailedWithWrongPackageName() throws PulsarAdminException { - try { - doThrow(new PulsarAdminException("package name is invalid")) - .when(mockedPackages).download(anyString(), anyString()); - registerDefaultFunctionWithPackageUrl("function://"); - } catch (RestException e) { - // expected exception - assertEquals(e.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace does not exist") - public void testRegisterFunctionNonExistingNamespace() { - try { - this.namespaceList.clear(); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant does not exist") - public void testRegisterFunctionNonexistantTenant() throws Exception { - try { - when(mockedTenants.getTenantInfo(any())).thenThrow(PulsarAdminException.NotFoundException.class); - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") - public void testRegisterFunctionFailure() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - doThrow(new IllegalArgumentException("function failed to register")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registration interrupted") - public void testRegisterFunctionInterrupted() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - doThrow(new IllegalStateException("Function registration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - registerDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - /* - Externally managed runtime, - uploadBuiltinSinksSources == false - Make sure uploadFileToBookkeeper is not called - */ - @Test - public void testRegisterFunctionSuccessK8sNoUpload() throws Exception { - mockedWorkerService.getWorkerConfig().setUploadBuiltinSinksSources(false); - - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class))) - .thenThrow(new RuntimeException("uploadFileToBookkeeper triggered")); - - }); - - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.getFunctionTypes(any(FunctionConfig.class), any(Class.class))).thenReturn(new Class[]{String.class, String.class}); - ctx.when(() -> FunctionCommon.convertRuntime(any(FunctionConfig.Runtime.class))).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.isFunctionCodeBuiltin(any())).thenReturn(true); - - }); - - doReturn(Function.class).when(mockedClassLoader).loadClass(anyString()); - - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedFunctionsManager.getFunctionArchive(any())).thenReturn(getPulsarApiExamplesNar().toPath()); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); - - when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - FunctionConfig functionConfig = createDefaultFunctionConfig(); - functionConfig.setJar("builtin://exclamation"); - - try (FileInputStream inputStream = new FileInputStream(getPulsarApiExamplesNar())) { - resource.registerFunction( - tenant, - namespace, - function, - inputStream, - mockedFormData, - null, - functionConfig, - null); - } - } - - /* - Externally managed runtime, - uploadBuiltinSinksSources == true - Make sure uploadFileToBookkeeper is called - */ - @Test - public void testRegisterFunctionSuccessK8sWithUpload() throws Exception { - final String injectedErrMsg = "uploadFileToBookkeeper triggered"; - mockedWorkerService.getWorkerConfig().setUploadBuiltinSinksSources(true); - - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class))) - .thenThrow(new RuntimeException(injectedErrMsg)); - - }); - - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.getFunctionTypes(any(FunctionConfig.class), any(Class.class))).thenReturn(new Class[]{String.class, String.class}); - ctx.when(() -> FunctionCommon.convertRuntime(any(FunctionConfig.Runtime.class))).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.isFunctionCodeBuiltin(any())).thenReturn(true); - }); - - doReturn(Function.class).when(mockedClassLoader).loadClass(anyString()); - - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedFunctionsManager.getFunctionArchive(any())).thenReturn(getPulsarApiExamplesNar().toPath()); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); - - when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - - FunctionConfig functionConfig = createDefaultFunctionConfig(); - functionConfig.setJar("builtin://exclamation"); - - try (FileInputStream inputStream = new FileInputStream(getPulsarApiExamplesNar())) { - try { - resource.registerFunction( - tenant, - namespace, - function, - inputStream, - mockedFormData, - null, - functionConfig, - null); - Assert.fail(); - } catch (RuntimeException e) { - Assert.assertEquals(e.getMessage(), injectedErrMsg); - } - } - } - - // - // Update Functions - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testUpdateFunctionMissingTenant() throws Exception { - try { - testUpdateFunctionMissingArguments( - null, - namespace, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Tenant is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testUpdateFunctionMissingNamespace() throws Exception { - try { - testUpdateFunctionMissingArguments( - tenant, - null, - function, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Namespace is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testUpdateFunctionMissingFunctionName() throws Exception { - try { - testUpdateFunctionMissingArguments( - tenant, - namespace, - null, - mockedInputStream, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Function name is not provided"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingPackage() throws Exception { - try { - mockWorkerUtils(); - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingInputTopic() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - null, - mockedFormData, - outputTopic, - outputSerdeClassName, - className, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Update contains no change") - public void testUpdateFunctionMissingClassName() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - "Update contains no change"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test - public void testUpdateFunctionChangedParallelism() throws Exception { - try { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism + 1, - null); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test - public void testUpdateFunctionChangedInputs() throws Exception { - mockWorkerUtils(); - - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - topicsToSerDeClassName, - mockedFormData, - "DifferentOutput", - outputSerdeClassName, - null, - parallelism, - null); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Input Topics cannot be altered") - public void testUpdateFunctionChangedOutput() throws Exception { - try { - mockWorkerUtils(); - - Map someOtherInput = new HashMap<>(); - someOtherInput.put("DifferentTopic", TopicSchema.DEFAULT_SERDE); - testUpdateFunctionMissingArguments( - tenant, - namespace, - function, - null, - someOtherInput, - mockedFormData, - outputTopic, - outputSerdeClassName, - null, - parallelism, - "Input Topics cannot be altered"); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testUpdateFunctionMissingArguments( - String tenant, - String namespace, - String function, - InputStream inputStream, - Map topicsToSerDeClassName, - FormDataContentDisposition details, - String outputTopic, - String outputSerdeClassName, - String className, - Integer parallelism, - String expectedError) throws Exception { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - FunctionConfig functionConfig = new FunctionConfig(); - if (tenant != null) { - functionConfig.setTenant(tenant); - } - if (namespace != null) { - functionConfig.setNamespace(namespace); - } - if (function != null) { - functionConfig.setName(function); - } - if (topicsToSerDeClassName != null) { - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - } - if (outputTopic != null) { - functionConfig.setOutput(outputTopic); - } - if (outputSerdeClassName != null) { - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - } - if (className != null) { - functionConfig.setClassName(className); - } - if (parallelism != null) { - functionConfig.setParallelism(parallelism); - } - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - - if (expectedError != null) { - doThrow(new IllegalArgumentException(expectedError)) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - } - - resource.updateFunction( - tenant, - namespace, - function, - inputStream, - details, - null, - functionConfig, - null, null); - - } - - private void updateDefaultFunction() { - updateDefaultFunctionWithPackageUrl(null); - } - - private void updateDefaultFunctionWithPackageUrl(String packageUrl) { - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - - resource.updateFunction( - tenant, - namespace, - function, - mockedInputStream, - mockedFormData, - packageUrl, - functionConfig, - null, null); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't exist") - public void testUpdateNotExistedFunction() { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "upload failure") - public void testUpdateFunctionUploadFailure() throws Exception { - try { - mockWorkerUtils(ctx -> { - ctx.when(() -> { - WorkerUtils.uploadFileToBookkeeper( - anyString(), - any(File.class), - any(Namespace.class)); - - }).thenThrow(new IOException("upload failure")); - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - }); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - @Test - public void testUpdateFunctionSuccess() throws Exception { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - updateDefaultFunction(); - } - - @Test - public void testUpdateFunctionWithUrl() { - Configurator.setRootLevel(Level.DEBUG); - - String fileLocation = FutureUtil.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - String filePackageUrl = "file://" + fileLocation; - - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - resource.updateFunction( - tenant, - namespace, - function, - null, - null, - filePackageUrl, - functionConfig, - null, null); - - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to register") - public void testUpdateFunctionFailure() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalArgumentException("function failed to register")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function registeration interrupted") - public void testUpdateFunctionInterrupted() throws Exception { - try { - mockWorkerUtils(); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalStateException("Function registeration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - updateDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - - @Test(timeOut = 20000) - public void testUpdateFunctionSuccessWithPackageName() { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - updateDefaultFunctionWithPackageUrl("function://public/default/test@v1"); - } - - @Test(timeOut = 20000) - public void testUpdateFunctionFailedWithWrongPackageName() throws PulsarAdminException { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - try { - doThrow(new PulsarAdminException("package name is invalid")) - .when(mockedPackages).download(anyString(), anyString()); - registerDefaultFunctionWithPackageUrl("function://"); - } catch (RestException e) { - // expected exception - assertEquals(e.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - } - } - - // - // deregister function - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testDeregisterFunctionMissingTenant() { - try { - - testDeregisterFunctionMissingArguments( - null, - namespace, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testDeregisterFunctionMissingNamespace() { - try { - testDeregisterFunctionMissingArguments( - tenant, - null, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testDeregisterFunctionMissingFunctionName() { - try { - testDeregisterFunctionMissingArguments( - tenant, - namespace, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testDeregisterFunctionMissingArguments( - String tenant, - String namespace, - String function - ) { - resource.deregisterFunction( - tenant, - namespace, - function, - null); - } - - private void deregisterDefaultFunction() { - resource.deregisterFunction( - tenant, - namespace, - function, - null); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't exist") - public void testDeregisterNotExistedFunction() { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.NOT_FOUND); - throw re; - } - } - - @Test - public void testDeregisterFunctionSuccess() { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - deregisterDefaultFunction(); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "function failed to deregister") - public void testDeregisterFunctionFailure() throws Exception { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalArgumentException("function failed to deregister")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function deregisteration interrupted") - public void testDeregisterFunctionInterrupted() throws Exception { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - doThrow(new IllegalStateException("Function deregisteration interrupted")) - .when(mockedManager).updateFunctionOnLeader(any(FunctionMetaData.class), Mockito.anyBoolean()); - - deregisterDefaultFunction(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); - throw re; - } - } - - // - // Get Function Info - // - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testGetFunctionMissingTenant() { - try { - testGetFunctionMissingArguments( - null, - namespace, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testGetFunctionMissingNamespace() { - try { - testGetFunctionMissingArguments( - tenant, - null, - function - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function name is not provided") - public void testGetFunctionMissingFunctionName() { - try { - testGetFunctionMissingArguments( - tenant, - namespace, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } - } - - private void testGetFunctionMissingArguments( - String tenant, - String namespace, - String function - ) { - resource.getFunctionInfo( - tenant, - namespace, - function,null - ); - - } - - private FunctionConfig getDefaultFunctionInfo() { - return resource.getFunctionInfo( - tenant, - namespace, - function, - null - ); - } - - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't exist") - public void testGetNotExistedFunction() { - try { - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - getDefaultFunctionInfo(); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.NOT_FOUND); - throw re; - } - } - - @Test - public void testGetFunctionSuccess() { - mockInstanceUtils(); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - - SinkSpec sinkSpec = SinkSpec.newBuilder() - .setTopic(outputTopic) - .setSerDeClassName(outputSerdeClassName).build(); - FunctionDetails functionDetails = FunctionDetails.newBuilder() - .setClassName(className) - .setSink(sinkSpec) - .setAutoAck(true) - .setName(function) - .setNamespace(namespace) - .setProcessingGuarantees(ProcessingGuarantees.ATMOST_ONCE) - .setTenant(tenant) - .setParallelism(parallelism) - .setSource(SourceSpec.newBuilder().setSubscriptionType(subscriptionType) - .putAllTopicsToSerDeClassName(topicsToSerDeClassName)).build(); - FunctionMetaData metaData = FunctionMetaData.newBuilder() - .setCreateTime(System.currentTimeMillis()) - .setFunctionDetails(functionDetails) - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("/path/to/package")) - .setVersion(1234) - .build(); - when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); - - FunctionConfig functionConfig = getDefaultFunctionInfo(); - assertEquals( - FunctionConfigUtils.convertFromDetails(functionDetails), - functionConfig); - } - - // - // List Functions - // + protected void registerFunction(String tenant, String namespace, String function, InputStream inputStream, + FormDataContentDisposition details, String functionPkgUrl, FunctionConfig functionConfig) { + resource.registerFunction( + tenant, + namespace, + function, + inputStream, + details, + functionPkgUrl, + functionConfig, + null); + } + protected void updateFunction(String tenant, + String namespace, + String functionName, + InputStream uploadedInputStream, + FormDataContentDisposition fileDetail, + String functionPkgUrl, + FunctionConfig functionConfig, + AuthenticationParameters authParams, + UpdateOptionsImpl updateOptions) { + resource.updateFunction(tenant, namespace, functionName, uploadedInputStream, fileDetail, functionPkgUrl, + functionConfig, authParams, updateOptions); + } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") - public void testListFunctionsMissingTenant() { - try { - testListFunctionsMissingArguments( - null, - namespace - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; - } + protected StreamingOutput downloadFunction(String tenant, String namespace, String componentName, + AuthenticationParameters authParams, boolean transformFunction) { + return resource.downloadFunction(tenant, namespace, componentName, authParams, transformFunction); } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Namespace is not provided") - public void testListFunctionsMissingNamespace() { - try { - testListFunctionsMissingArguments( - tenant, - null - ); - } catch (RestException re) { - assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); - throw re; + protected File downloadFunction(final String path, final AuthenticationParameters authParams) throws IOException { + StreamingOutput streamingOutput = resource.downloadFunction(path, authParams); + File pkgFile = File.createTempFile("testpkg", "nar"); + try(OutputStream output = new FileOutputStream(pkgFile)) { + streamingOutput.write(output); } + return pkgFile; } - private void testListFunctionsMissingArguments( + protected void testDeregisterFunctionMissingArguments( String tenant, - String namespace + String namespace, + String function ) { - resource.listFunctions( - tenant, - namespace,null - ); - - } - - private List listDefaultFunctions() { - return resource.listFunctions( - tenant, - namespace,null - ); - } - - @Test - public void testListFunctionsSuccess() { - mockInstanceUtils(); - final List functions = Lists.newArrayList("test-1", "test-2"); - final List metaDataList = new LinkedList<>(); - FunctionMetaData functionMetaData1 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder().setName("test-1").build() - ).build(); - FunctionMetaData functionMetaData2 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder().setName("test-2").build() - ).build(); - metaDataList.add(functionMetaData1); - metaDataList.add(functionMetaData2); - when(mockedManager.listFunctions(eq(tenant), eq(namespace))).thenReturn(metaDataList); - - List functionList = listDefaultFunctions(); - assertEquals(functions, functionList); + resource.deregisterFunction( + tenant, + namespace, + function, + null); } - @Test - public void testOnlyGetSources() { - List functions = Lists.newArrayList("test-2"); - List functionMetaDataList = new LinkedList<>(); - FunctionMetaData f1 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder() - .setName("test-1") - .setComponentType(FunctionDetails.ComponentType.SOURCE) - .build()).build(); - functionMetaDataList.add(f1); - FunctionMetaData f2 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder() - .setName("test-2") - .setComponentType(FunctionDetails.ComponentType.FUNCTION) - .build()).build(); - functionMetaDataList.add(f2); - FunctionMetaData f3 = FunctionMetaData.newBuilder().setFunctionDetails( - FunctionDetails.newBuilder() - .setName("test-3") - .setComponentType(FunctionDetails.ComponentType.SINK) - .build()).build(); - functionMetaDataList.add(f3); - when(mockedManager.listFunctions(eq(tenant), eq(namespace))).thenReturn(functionMetaDataList); - - List functionList = listDefaultFunctions(); - assertEquals(functions, functionList); + protected void deregisterDefaultFunction() { + resource.deregisterFunction( + tenant, + namespace, + function, + null); } - @Test - public void testDownloadFunctionHttpUrl() throws Exception { - String jarHttpUrl = - "https://repo1.maven.org/maven2/org/apache/pulsar/pulsar-common/2.4.2/pulsar-common-2.4.2.jar"; - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + protected void testGetFunctionMissingArguments( + String tenant, + String namespace, + String function + ) { + resource.getFunctionInfo( + tenant, + namespace, + function, null + ); - StreamingOutput streamOutput = resource.downloadFunction(jarHttpUrl, null); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - Assert.assertTrue(pkgFile.exists()); - pkgFile.delete(); } - @Test - public void testDownloadFunctionFile() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - - StreamingOutput streamOutput = resource.downloadFunction("file:///" + fileLocation, null); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - Assert.assertTrue(pkgFile.exists()); - Assert.assertEquals(file.length(), pkgFile.length()); - pkgFile.delete(); + protected FunctionConfig getDefaultFunctionInfo() { + return resource.getFunctionInfo( + tenant, + namespace, + function, + null + ); } - @Test - public void testDownloadFunctionBuiltinConnector() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - - WorkerConfig config = new WorkerConfig() - .setUploadBuiltinSinksSources(false); - when(mockedWorkerService.getWorkerConfig()).thenReturn(config); - - Connector connector = Connector.builder().archivePath(file.toPath()).build(); - ConnectorsManager connectorsManager = mock(ConnectorsManager.class); - when(connectorsManager.getConnector("cassandra")).thenReturn(connector); - when(mockedWorkerService.getConnectorsManager()).thenReturn(connectorsManager); - - StreamingOutput streamOutput = resource.downloadFunction("builtin://cassandra", null); + protected void testListFunctionsMissingArguments( + String tenant, + String namespace + ) { + resource.listFunctions( + tenant, + namespace, null + ); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - output.flush(); - output.close(); - Assert.assertTrue(pkgFile.exists()); - Assert.assertTrue(pkgFile.exists()); - Assert.assertEquals(file.length(), pkgFile.length()); - pkgFile.delete(); } - @Test - public void testDownloadFunctionBuiltinFunction() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); - - WorkerConfig config = new WorkerConfig() - .setUploadBuiltinSinksSources(false); - when(mockedWorkerService.getWorkerConfig()).thenReturn(config); - - FunctionsManager functionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder().archivePath(file.toPath()).build(); - when(functionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedWorkerService.getConnectorsManager()).thenReturn(mock(ConnectorsManager.class)); - when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); - - StreamingOutput streamOutput = resource.downloadFunction("builtin://exclamation", null); - - File pkgFile = new File(testDir, UUID.randomUUID().toString()); - OutputStream output = new FileOutputStream(pkgFile); - streamOutput.write(output); - output.flush(); - output.close(); - Assert.assertTrue(pkgFile.exists()); - Assert.assertEquals(file.length(), pkgFile.length()); - pkgFile.delete(); + protected List listDefaultFunctions() { + return resource.listFunctions( + tenant, + namespace, null + ); } @Test public void testDownloadFunctionBuiltinConnectorByName() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + File file = getPulsarApiExamplesNar(); WorkerConfig config = new WorkerConfig() - .setUploadBuiltinSinksSources(false); + .setUploadBuiltinSinksSources(false); when(mockedWorkerService.getWorkerConfig()).thenReturn(config); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - FunctionMetaData metaData = FunctionMetaData.newBuilder() - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("builtin://cassandra")) - .setTransformFunctionPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) - .setFunctionDetails(FunctionDetails.newBuilder().setComponentType(FunctionDetails.ComponentType.SINK)) + Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder() + .setPackageLocation(Function.PackageLocationMetaData.newBuilder().setPackagePath("builtin://cassandra")) + .setTransformFunctionPackageLocation( + Function.PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) + .setFunctionDetails(Function.FunctionDetails.newBuilder().setComponentType(Function.FunctionDetails.ComponentType.SINK)) .build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); - Connector connector = Connector.builder().archivePath(file.toPath()).build(); - ConnectorsManager connectorsManager = mock(ConnectorsManager.class); - when(connectorsManager.getConnector("cassandra")).thenReturn(connector); - when(mockedWorkerService.getConnectorsManager()).thenReturn(connectorsManager); + registerBuiltinConnector("cassandra", file); - StreamingOutput streamOutput = resource.downloadFunction(tenant, namespace, function, + StreamingOutput streamOutput = downloadFunction(tenant, namespace, function, AuthenticationParameters.builder().build(), false); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); + File pkgFile = File.createTempFile("testpkg", "nar"); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); Assert.assertTrue(pkgFile.exists()); @@ -1812,31 +189,27 @@ public void testDownloadFunctionBuiltinConnectorByName() throws Exception { @Test public void testDownloadFunctionBuiltinFunctionByName() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + File file = getPulsarApiExamplesNar(); WorkerConfig config = new WorkerConfig() - .setUploadBuiltinSinksSources(false); + .setUploadBuiltinSinksSources(false); when(mockedWorkerService.getWorkerConfig()).thenReturn(config); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - FunctionMetaData metaData = FunctionMetaData.newBuilder() - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("builtin://exclamation")) - .setTransformFunctionPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) - .setFunctionDetails(FunctionDetails.newBuilder().setComponentType(FunctionDetails.ComponentType.FUNCTION)) - .build(); + Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder() + .setPackageLocation(Function.PackageLocationMetaData.newBuilder().setPackagePath("builtin://exclamation")) + .setTransformFunctionPackageLocation( + Function.PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) + .setFunctionDetails( + Function.FunctionDetails.newBuilder().setComponentType(Function.FunctionDetails.ComponentType.FUNCTION)) + .build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); - FunctionsManager functionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder().archivePath(file.toPath()).build(); - when(functionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedWorkerService.getConnectorsManager()).thenReturn(mock(ConnectorsManager.class)); - when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); + registerBuiltinFunction("exclamation", file); - StreamingOutput streamOutput = resource.downloadFunction(tenant, namespace, function, + StreamingOutput streamOutput = downloadFunction(tenant, namespace, function, AuthenticationParameters.builder().build(), false); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); + File pkgFile = File.createTempFile("testpkg", "nar"); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); Assert.assertTrue(pkgFile.exists()); @@ -1846,32 +219,26 @@ public void testDownloadFunctionBuiltinFunctionByName() throws Exception { @Test public void testDownloadTransformFunctionByName() throws Exception { - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); + File file = getPulsarApiExamplesNar(); WorkerConfig workerConfig = new WorkerConfig() - .setUploadBuiltinSinksSources(false); + .setUploadBuiltinSinksSources(false); when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - FunctionMetaData metaData = FunctionMetaData.newBuilder() - .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) - .setTransformFunctionPackageLocation(PackageLocationMetaData.newBuilder() + Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder() + .setPackageLocation(Function.PackageLocationMetaData.newBuilder().setPackagePath("http://invalid")) + .setTransformFunctionPackageLocation(Function.PackageLocationMetaData.newBuilder() .setPackagePath("builtin://exclamation")) .build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); - FunctionsManager functionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder().archivePath(file.toPath()).build(); - when(functionsManager.getFunction("exclamation")).thenReturn(functionArchive); - when(mockedWorkerService.getConnectorsManager()).thenReturn(mock(ConnectorsManager.class)); - when(mockedWorkerService.getFunctionsManager()).thenReturn(functionsManager); + registerBuiltinFunction("exclamation", file); - StreamingOutput streamOutput = resource.downloadFunction(tenant, namespace, function, + StreamingOutput streamOutput = downloadFunction(tenant, namespace, function, AuthenticationParameters.builder().build(), true); - File pkgFile = new File(testDir, UUID.randomUUID().toString()); + File pkgFile = File.createTempFile("testpkg", "nar"); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); Assert.assertTrue(pkgFile.exists()); @@ -1879,15 +246,58 @@ public void testDownloadTransformFunctionByName() throws Exception { pkgFile.delete(); } + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function test-function doesn't" + + " exist") + public void testGetNotExistedFunction() throws IOException { + try { + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); + getDefaultFunctionInfo(); + } catch (RestException re) { + assertEquals(re.getResponse().getStatusInfo(), Response.Status.NOT_FOUND); + throw re; + } + } @Test - public void testRegisterFunctionFileUrlWithValidSinkClass() throws Exception { + public void testGetFunctionSuccess() throws IOException { + mockInstanceUtils(); + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); + + Function.SinkSpec sinkSpec = Function.SinkSpec.newBuilder() + .setTopic(outputTopic) + .setSerDeClassName(outputSerdeClassName).build(); + Function.FunctionDetails functionDetails = Function.FunctionDetails.newBuilder() + .setClassName(className) + .setSink(sinkSpec) + .setAutoAck(true) + .setName(function) + .setNamespace(namespace) + .setProcessingGuarantees(Function.ProcessingGuarantees.ATMOST_ONCE) + .setTenant(tenant) + .setParallelism(parallelism) + .setSource(Function.SourceSpec.newBuilder().setSubscriptionType(subscriptionType) + .putAllTopicsToSerDeClassName(topicsToSerDeClassName)).build(); + Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder() + .setCreateTime(System.currentTimeMillis()) + .setFunctionDetails(functionDetails) + .setPackageLocation(Function.PackageLocationMetaData.newBuilder().setPackagePath("/path/to/package")) + .setVersion(1234) + .build(); + when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(function))).thenReturn(metaData); + + FunctionConfig functionConfig = getDefaultFunctionInfo(); + assertEquals( + FunctionConfigUtils.convertFromDetails(functionDetails), + functionConfig); + } + + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function language runtime is " + + "either not set or cannot be determined") + public void testCreateFunctionWithoutSettingRuntime() throws Exception { Configurator.setRootLevel(Level.DEBUG); - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; + File file = getPulsarApiExamplesNar(); + String filePackageUrl = file.toURI().toString(); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); FunctionConfig functionConfig = new FunctionConfig(); @@ -1896,82 +306,135 @@ public void testRegisterFunctionFileUrlWithValidSinkClass() throws Exception { functionConfig.setName(function); functionConfig.setClassName(className); functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); functionConfig.setOutput(outputTopic); functionConfig.setOutputSerdeClassName(outputSerdeClassName); - resource.registerFunction(tenant, namespace, function, null, null, filePackageUrl, functionConfig, null); + registerFunction(tenant, namespace, function, null, null, filePackageUrl, functionConfig); } @Test - public void testRegisterFunctionWithConflictingFields() throws Exception { - Configurator.setRootLevel(Level.DEBUG); - String actualTenant = "DIFFERENT_TENANT"; - String actualNamespace = "DIFFERENT_NAMESPACE"; - String actualName = "DIFFERENT_NAME"; - this.namespaceList.add(actualTenant + "/" + actualNamespace); + public void testUpdateSourceWithNoChange() throws IOException { + mockWorkerUtils(); - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); - when(mockedManager.containsFunction(eq(actualTenant), eq(actualNamespace), eq(actualName))).thenReturn(false); + FunctionConfig funcConfig = createDefaultFunctionConfig(); - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - resource.registerFunction(actualTenant, actualNamespace, actualName, null, null, filePackageUrl, functionConfig, - null); + // config has not changes and don't update auth, should fail + try { + updateFunction( + funcConfig.getTenant(), + funcConfig.getNamespace(), + funcConfig.getName(), + null, + mockedFormData, + null, + funcConfig, + null, + null); + fail("Update without changes should fail"); + } catch (RestException e) { + assertThat(e.getMessage()).contains("Update contains no change"); + } + + try { + UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); + updateOptions.setUpdateAuthData(false); + updateFunction( + funcConfig.getTenant(), + funcConfig.getNamespace(), + funcConfig.getName(), + null, + mockedFormData, + null, + funcConfig, + null, + updateOptions); + fail("Update without changes should fail"); + } catch (RestException e) { + assertTrue(e.getMessage().contains("Update contains no change")); + } + + // no changes but set the auth-update flag to true, should not fail + UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); + updateOptions.setUpdateAuthData(true); + updateFunction( + funcConfig.getTenant(), + funcConfig.getNamespace(), + funcConfig.getName(), + null, + mockedFormData, + null, + funcConfig, + null, + updateOptions); } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function language runtime is either not set or cannot be determined") - public void testCreateFunctionWithoutSettingRuntime() throws Exception { - Configurator.setRootLevel(Level.DEBUG); + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Function config is not provided") + public void testMissingFunctionConfig() throws IOException { + registerFunction(tenant, namespace, function, mockedInputStream, mockedFormData, null, null); + } + + /* + Externally managed runtime, + uploadBuiltinSinksSources == false + Make sure uploadFileToBookkeeper is not called + */ + @Test + public void testRegisterFunctionSuccessK8sNoUpload() throws Exception { + mockedWorkerService.getWorkerConfig().setUploadBuiltinSinksSources(false); + + mockStatic(WorkerUtils.class, ctx -> { + ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( + anyString(), + any(File.class), + any(Namespace.class))) + .thenThrow(new RuntimeException("uploadFileToBookkeeper triggered")); + + }); - URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); - File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath().replace('\\', '/'); - String filePackageUrl = "file:///" + fileLocation; + registerBuiltinFunction("exclamation", getPulsarApiExamplesNar()); + when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - resource.registerFunction(tenant, namespace, function, null, null, filePackageUrl, functionConfig, null); + FunctionConfig functionConfig = createDefaultFunctionConfig(); + functionConfig.setJar("builtin://exclamation"); + registerFunction(tenant, namespace, function, null, mockedFormData, null, functionConfig); } - public static FunctionConfig createDefaultFunctionConfig() { - FunctionConfig functionConfig = new FunctionConfig(); - functionConfig.setTenant(tenant); - functionConfig.setNamespace(namespace); - functionConfig.setName(function); - functionConfig.setClassName(className); - functionConfig.setParallelism(parallelism); - functionConfig.setCustomSerdeInputs(topicsToSerDeClassName); - functionConfig.setOutput(outputTopic); - functionConfig.setOutputSerdeClassName(outputSerdeClassName); - functionConfig.setRuntime(FunctionConfig.Runtime.JAVA); - return functionConfig; - } + /* + Externally managed runtime, + uploadBuiltinSinksSources == true + Make sure uploadFileToBookkeeper is called + */ + @Test + public void testRegisterFunctionSuccessK8sWithUpload() throws Exception { + final String injectedErrMsg = "uploadFileToBookkeeper triggered"; + mockedWorkerService.getWorkerConfig().setUploadBuiltinSinksSources(true); + + mockStatic(WorkerUtils.class, ctx -> { + ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( + anyString(), + any(File.class), + any(Namespace.class))) + .thenThrow(new RuntimeException(injectedErrMsg)); + + }); + + registerBuiltinFunction("exclamation", getPulsarApiExamplesNar()); + when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); - public static FunctionDetails createDefaultFunctionDetails() { FunctionConfig functionConfig = createDefaultFunctionConfig(); - return FunctionConfigUtils.convert(functionConfig, (ClassLoader) null); + functionConfig.setJar("builtin://exclamation"); + + try { + registerFunction(tenant, namespace, function, null, mockedFormData, null, functionConfig); + Assert.fail(); + } catch (RuntimeException e) { + Assert.assertEquals(e.getMessage(), injectedErrMsg); + } } + } diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SinkApiV3ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SinkApiV3ResourceTest.java index 5dcc795304ef5..c6c6303e48007 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SinkApiV3ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SinkApiV3ResourceTest.java @@ -18,246 +18,86 @@ */ package org.apache.pulsar.functions.worker.rest.api.v3; +import static org.apache.pulsar.functions.proto.Function.ProcessingGuarantees.ATLEAST_ONCE; +import static org.apache.pulsar.functions.source.TopicSchema.DEFAULT_SERDE; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import com.google.common.collect.Lists; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.nio.file.StandardCopyOption; +import java.lang.reflect.Method; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Objects; -import java.util.function.Consumer; import javax.ws.rs.core.Response; import org.apache.distributedlog.api.namespace.Namespace; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.core.config.Configurator; import org.apache.pulsar.broker.authentication.AuthenticationParameters; -import org.apache.pulsar.client.admin.Functions; -import org.apache.pulsar.client.admin.Namespaces; -import org.apache.pulsar.client.admin.Packages; -import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.admin.Tenants; -import org.apache.pulsar.common.functions.FunctionConfig; import org.apache.pulsar.common.functions.UpdateOptionsImpl; import org.apache.pulsar.common.functions.Utils; import org.apache.pulsar.common.io.SinkConfig; -import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; -import org.apache.pulsar.functions.api.examples.ExclamationFunction; import org.apache.pulsar.functions.api.examples.RecordFunction; import org.apache.pulsar.functions.api.utils.IdentityFunction; import org.apache.pulsar.functions.instance.InstanceUtils; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.FunctionDetails; import org.apache.pulsar.functions.proto.Function.FunctionMetaData; -import org.apache.pulsar.functions.runtime.RuntimeFactory; -import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.SinkConfigUtils; -import org.apache.pulsar.functions.utils.functions.FunctionArchive; -import org.apache.pulsar.functions.utils.functions.FunctionUtils; -import org.apache.pulsar.functions.utils.io.Connector; -import org.apache.pulsar.functions.utils.io.ConnectorUtils; -import org.apache.pulsar.functions.worker.ConnectorsManager; -import org.apache.pulsar.functions.worker.FunctionMetaDataManager; -import org.apache.pulsar.functions.worker.FunctionRuntimeManager; -import org.apache.pulsar.functions.worker.FunctionsManager; -import org.apache.pulsar.functions.worker.LeaderService; -import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; -import org.apache.pulsar.functions.worker.rest.api.PulsarFunctionTestTemporaryDirectory; import org.apache.pulsar.functions.worker.rest.api.SinksImpl; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; import org.mockito.MockedStatic; import org.mockito.Mockito; import org.testng.Assert; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import static org.apache.pulsar.functions.proto.Function.ProcessingGuarantees.ATLEAST_ONCE; -import static org.apache.pulsar.functions.source.TopicSchema.DEFAULT_SERDE; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.when; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertNotNull; -import static org.testng.Assert.assertTrue; -import static org.testng.Assert.fail; /** * Unit test of {@link SinksApiV3Resource}. */ -public class SinkApiV3ResourceTest { +public class SinkApiV3ResourceTest extends AbstractFunctionsResourceTest { - private static final String tenant = "test-tenant"; - private static final String namespace = "test-namespace"; private static final String sink = "test-sink"; - private static final Map topicsToSerDeClassName = new HashMap<>(); - - static { - topicsToSerDeClassName.put("test_src", DEFAULT_SERDE); - } - - private static final String subscriptionName = "test-subscription"; - private static final String CASSANDRA_STRING_SINK = "org.apache.pulsar.io.cassandra.CassandraStringSink"; - private static final int parallelism = 1; - - private PulsarWorkerService mockedWorkerService; - private PulsarAdmin mockedPulsarAdmin; - private Tenants mockedTenants; - private Namespaces mockedNamespaces; - private Functions mockedFunctions; - private TenantInfoImpl mockedTenantInfo; - private List namespaceList = new LinkedList<>(); - private FunctionMetaDataManager mockedManager; - private FunctionRuntimeManager mockedFunctionRunTimeManager; - private RuntimeFactory mockedRuntimeFactory; - private Namespace mockedNamespace; + private SinksImpl resource; - private InputStream mockedInputStream; - private FormDataContentDisposition mockedFormData; - private FunctionMetaData mockedFunctionMetaData; - private LeaderService mockedLeaderService; - private Packages mockedPackages; - private PulsarFunctionTestTemporaryDirectory tempDirectory; - private static Map mockStaticContexts = new HashMap<>(); - - private static final String SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH = "pulsar-io-cassandra.nar.path"; - - public static File getPulsarIOCassandraNar() { - return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH) - , "pulsar-io-cassandra.nar file location must be specified with " - + SYSTEM_PROPERTY_NAME_CASSANDRA_NAR_FILE_PATH + " system property")); - } - - private static final String SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH = "pulsar-io-twitter.nar.path"; - - public static File getPulsarIOTwitterNar() { - return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH) - , "pulsar-io-twitter.nar file location must be specified with " - + SYSTEM_PROPERTY_NAME_TWITTER_NAR_FILE_PATH + " system property")); - } - - private static final String SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH = "pulsar-io-invalid.nar.path"; - - public static File getPulsarIOInvalidNar() { - return new File(Objects.requireNonNull(System.getProperty(SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH) - , "invalid nar file location must be specified with " - + SYSTEM_PROPERTY_NAME_INVALID_NAR_FILE_PATH + " system property")); - } - - @BeforeMethod - public void setup() throws Exception { - this.mockedManager = mock(FunctionMetaDataManager.class); - this.mockedFunctionRunTimeManager = mock(FunctionRuntimeManager.class); - this.mockedRuntimeFactory = mock(RuntimeFactory.class); - this.mockedInputStream = mock(InputStream.class); - this.mockedNamespace = mock(Namespace.class); - this.mockedFormData = mock(FormDataContentDisposition.class); - when(mockedFormData.getFileName()).thenReturn("test"); - this.mockedTenantInfo = mock(TenantInfoImpl.class); - this.mockedPulsarAdmin = mock(PulsarAdmin.class); - this.mockedTenants = mock(Tenants.class); - this.mockedNamespaces = mock(Namespaces.class); - this.mockedFunctions = mock(Functions.class); - this.mockedLeaderService = mock(LeaderService.class); - this.mockedPackages = mock(Packages.class); - namespaceList.add(tenant + "/" + namespace); - - this.mockedWorkerService = mock(PulsarWorkerService.class); - when(mockedWorkerService.getFunctionMetaDataManager()).thenReturn(mockedManager); - when(mockedWorkerService.getLeaderService()).thenReturn(mockedLeaderService); - when(mockedWorkerService.getFunctionRuntimeManager()).thenReturn(mockedFunctionRunTimeManager); - when(mockedFunctionRunTimeManager.getRuntimeFactory()).thenReturn(mockedRuntimeFactory); - when(mockedWorkerService.getDlogNamespace()).thenReturn(mockedNamespace); - when(mockedWorkerService.isInitialized()).thenReturn(true); - when(mockedWorkerService.getBrokerAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedWorkerService.getFunctionAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedPulsarAdmin.tenants()).thenReturn(mockedTenants); - when(mockedPulsarAdmin.namespaces()).thenReturn(mockedNamespaces); - when(mockedPulsarAdmin.functions()).thenReturn(mockedFunctions); - when(mockedPulsarAdmin.packages()).thenReturn(mockedPackages); - when(mockedTenants.getTenantInfo(any())).thenReturn(mockedTenantInfo); - when(mockedNamespaces.getNamespaces(any())).thenReturn(namespaceList); - when(mockedLeaderService.isLeader()).thenReturn(true); - doAnswer(invocationOnMock -> { - Files.copy(getPulsarIOCassandraNar().toPath(), Paths.get(invocationOnMock.getArgument(1, String.class)), - StandardCopyOption.REPLACE_EXISTING); - return null; - }).when(mockedPackages).download(any(), any()); - - // worker config - WorkerConfig workerConfig = new WorkerConfig() - .setWorkerId("test") - .setWorkerPort(8080) - .setFunctionMetadataTopicName("pulsar/functions") - .setNumFunctionPackageReplicas(3) - .setPulsarServiceUrl("pulsar://localhost:6650/"); - tempDirectory = PulsarFunctionTestTemporaryDirectory.create(getClass().getSimpleName()); - tempDirectory.useTemporaryDirectoriesForWorkerConfig(workerConfig); - when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); + @Override + protected void doSetup() { this.resource = spy(new SinksImpl(() -> mockedWorkerService)); - } - @AfterMethod(alwaysRun = true) - public void cleanup() { - if (tempDirectory != null) { - tempDirectory.delete(); + @Override + protected void customizeWorkerConfig(WorkerConfig workerConfig, Method method) { + if (method.getName().contains("Upload") || method.getName().contains("BKPackage")) { + workerConfig.setFunctionsWorkerEnablePackageManagement(false); } - mockStaticContexts.values().forEach(MockedStatic::close); - mockStaticContexts.clear(); } - - private void mockStatic(Class classStatic, Consumer> consumer) { - final MockedStatic mockedStatic = - mockStaticContexts.computeIfAbsent(classStatic.getName(), name -> Mockito.mockStatic(classStatic)); - consumer.accept(mockedStatic); - } - - private void mockWorkerUtils() { - mockWorkerUtils(null); - } - - private void mockWorkerUtils(Consumer> consumer) { - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - if (consumer != null) { - consumer.accept(ctx); - } - }); + @Override + protected Function.FunctionDetails.ComponentType getComponentType() { + return Function.FunctionDetails.ComponentType.SINK; } - private void mockInstanceUtils() { - mockStatic(InstanceUtils.class, ctx -> { - ctx.when(() -> InstanceUtils.calculateSubjectType(any())) - .thenReturn(FunctionDetails.ComponentType.SINK); - }); + @Override + protected File getDefaultNarFile() { + return getPulsarIOCassandraNar(); } - - // - // Register Functions - // - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Tenant is not provided") public void testRegisterSinkMissingTenant() { try { @@ -337,8 +177,8 @@ public void testRegisterSinkMissingPackage() { } } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Sink class UnknownClass must " - + "be in class path") + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Sink class UnknownClass not " + + "found") public void testRegisterSinkWrongClassName() { mockInstanceUtils(); try { @@ -359,10 +199,8 @@ public void testRegisterSinkWrongClassName() { } } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Sink package does not have the" - + " correct format. Pulsar cannot determine if the package is a NAR package" - + " or JAR package. Sink classname is not provided and attempts to load it as a NAR package produced the " - + "following error.") + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Sink package doesn't contain " + + "the META-INF/services/pulsar-io.yaml file.") public void testRegisterSinkMissingPackageDetails() { mockInstanceUtils(); try { @@ -722,30 +560,11 @@ public void testRegisterSinkSuccessWithTransformFunction() throws Exception { when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(false); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - doReturn(RecordFunction.class).when(mockedClassLoader).loadClass("RecordFunction"); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(FunctionCommon::createPkgTempFile).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getRawFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionClassParent(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mockedClassLoader); - }); - - mockStatic(FunctionUtils.class, ctx -> { - ctx.when(() -> FunctionUtils.getFunctionClass(any())).thenReturn("RecordFunction"); - }); - - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("transform")).thenReturn(functionArchive); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); + registerBuiltinConnector("recordfunction", RecordFunction.class.getName()); + registerBuiltinFunction("transform", RecordFunction.class.getName()); SinkConfig sinkConfig = createDefaultSinkConfig(); + sinkConfig.setSinkType("builtin://recordfunction"); sinkConfig.setTransformFunction("builtin://transform"); sinkConfig.setTransformFunctionConfig("{\"dummy\": \"dummy\"}"); @@ -770,28 +589,7 @@ public void testRegisterSinkFailureWithInvalidTransformFunction() throws Excepti when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(false); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - doReturn(ExclamationFunction.class).when(mockedClassLoader).loadClass("ExclamationFunction"); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(FunctionCommon::createPkgTempFile).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getRawFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionClassParent(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mockedClassLoader); - }); - - mockStatic(FunctionUtils.class, ctx -> { - ctx.when(() -> FunctionUtils.getFunctionClass(any())).thenReturn("ExclamationFunction"); - }); - - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("transform")).thenReturn(functionArchive); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); + registerBuiltinFunction("transform", getPulsarApiExamplesNar()); SinkConfig sinkConfig = createDefaultSinkConfig(); sinkConfig.setTransformFunction("builtin://transform"); @@ -946,16 +744,18 @@ public void testUpdateSinkDifferentInputs() throws Exception { public void testUpdateSinkDifferentParallelism() throws Exception { mockWorkerUtils(); - testUpdateSinkMissingArguments( - tenant, - namespace, - sink, - null, - mockedFormData, - topicsToSerDeClassName, - CASSANDRA_STRING_SINK, - parallelism + 1, - null); + try (FileInputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { + testUpdateSinkMissingArguments( + tenant, + namespace, + sink, + inputStream, + mockedFormData, + topicsToSerDeClassName, + CASSANDRA_STRING_SINK, + parallelism + 1, + null); + } } private void testUpdateSinkMissingArguments( @@ -1007,32 +807,6 @@ private void testUpdateSinkMissingArguments( } - private void mockFunctionCommon(String tenant, String namespace, String sink) throws IOException { - mockStatic(ConnectorUtils.class, ctx -> { - ctx.when(() -> ConnectorUtils.getIOSinkClass(any(NarClassLoader.class))) - .thenReturn(CASSANDRA_STRING_SINK); - }); - - mockStatic(ClassLoaderUtils.class, ctx -> { - }); - - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.createPkgTempFile()).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSinkType(any())).thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mock(NarClassLoader.class)); - ctx.when(() -> FunctionCommon - .convertProcessingGuarantee(eq(FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE))) - .thenReturn(ATLEAST_ONCE); - }); - - this.mockedFunctionMetaData = - FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); - when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(sink))).thenReturn(mockedFunctionMetaData); - - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(true); - } - private void updateDefaultSink() throws Exception { updateDefaultSinkWithPackageUrl(null); } @@ -1040,25 +814,6 @@ private void updateDefaultSink() throws Exception { private void updateDefaultSinkWithPackageUrl(String packageUrl) throws Exception { SinkConfig sinkConfig = createDefaultSinkConfig(); - mockStatic(ConnectorUtils.class, ctx -> { - ctx.when(() -> ConnectorUtils.getIOSinkClass(any(NarClassLoader.class))) - .thenReturn(CASSANDRA_STRING_SINK); - }); - - mockStatic(ClassLoaderUtils.class, ctx -> { - }); - - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.createPkgTempFile()).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSinkType(any())).thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mock(NarClassLoader.class)); - ctx.when(() -> FunctionCommon - .convertProcessingGuarantee(eq(FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE))) - .thenReturn(ATLEAST_ONCE); - }); - - this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); @@ -1091,7 +846,6 @@ public void testUpdateNotExistedSink() throws Exception { public void testUpdateSinkUploadFailure() throws Exception { try { mockWorkerUtils(ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); ctx.when(() -> WorkerUtils.uploadFileToBookkeeper( anyString(), any(File.class), @@ -1127,24 +881,6 @@ public void testUpdateSinkWithUrl() throws Exception { when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(true); - mockStatic(ConnectorUtils.class, ctx -> { - ctx.when(() -> ConnectorUtils.getIOSinkClass(any())) - .thenReturn(CASSANDRA_STRING_SINK); - }); - - mockStatic(ClassLoaderUtils.class, ctx -> { - }); - - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.extractFileFromPkgURL(any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSinkType(any())).thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mock(NarClassLoader.class)); - ctx.when(() -> FunctionCommon - .convertProcessingGuarantee(eq(FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE))) - .thenReturn(ATLEAST_ONCE); - }); - this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); @@ -1219,35 +955,17 @@ public void testUpdateSinkDifferentTransformFunction() throws Exception { SinkConfig sinkConfig = createDefaultSinkConfig(); sinkConfig.setTransformFunction("builtin://transform"); - sinkConfig.setTransformFunctionClassName("DummyFunction"); sinkConfig.setTransformFunctionConfig("{\"dummy\": \"dummy\"}"); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - doReturn(RecordFunction.class).when(mockedClassLoader).loadClass("DummyFunction"); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(FunctionCommon::createPkgTempFile).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getRawFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionTypes(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getFunctionClassParent(any(), anyBoolean())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mockedClassLoader); - }); + registerBuiltinFunction("transform", RecordFunction.class.getName()); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(true); this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(sink))).thenReturn(mockedFunctionMetaData); - when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(true); - FunctionsManager mockedFunctionsManager = mock(FunctionsManager.class); - FunctionArchive functionArchive = FunctionArchive.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedFunctionsManager.getFunction("transform")).thenReturn(functionArchive); - - when(mockedWorkerService.getFunctionsManager()).thenReturn(mockedFunctionsManager); - - try (FileInputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { resource.updateSink( tenant, @@ -1737,6 +1455,14 @@ private SinkConfig createDefaultSinkConfig() { return sinkConfig; } + private void mockFunctionCommon(String tenant, String namespace, String sink) throws IOException { + this.mockedFunctionMetaData = + Function.FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); + when(mockedManager.getFunctionMetaData(eq(tenant), eq(namespace), eq(sink))).thenReturn(mockedFunctionMetaData); + + when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(true); + } + private FunctionDetails createDefaultFunctionDetails() throws IOException { return SinkConfigUtils.convert(createDefaultSinkConfig(), new SinkConfigUtils.ExtractedSinkDetails(null, null, null)); @@ -1760,21 +1486,7 @@ public void testRegisterSinkSuccessK8sNoUpload() throws Exception { }); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.getSinkType(any())).thenReturn(String.class); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.isFunctionCodeBuiltin(any())).thenReturn(true); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mockedClassLoader); - }); - - ConnectorsManager mockedConnManager = mock(ConnectorsManager.class); - Connector connector = Connector.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedConnManager.getConnector("cassandra")).thenReturn(connector); - when(mockedConnManager.getSinkArchive(any())).thenReturn(getPulsarIOCassandraNar().toPath()); - when(mockedWorkerService.getConnectorsManager()).thenReturn(mockedConnManager); + registerBuiltinConnector("cassandra", getPulsarIOCassandraNar()); when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(false); @@ -1782,17 +1494,15 @@ public void testRegisterSinkSuccessK8sNoUpload() throws Exception { SinkConfig sinkConfig = createDefaultSinkConfig(); sinkConfig.setArchive("builtin://cassandra"); - try (FileInputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { - resource.registerSink( - tenant, - namespace, - sink, - inputStream, - mockedFormData, - null, - sinkConfig, - null); - } + resource.registerSink( + tenant, + namespace, + sink, + null, + mockedFormData, + null, + sinkConfig, + null); } /* @@ -1814,23 +1524,7 @@ public void testRegisterSinkSuccessK8sWithUpload() throws Exception { }); - NarClassLoader mockedClassLoader = mock(NarClassLoader.class); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.getSinkType(any())).thenReturn(String.class); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.isFunctionCodeBuiltin(any())).thenReturn(true); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())).thenReturn(mockedClassLoader); - }); - - ConnectorsManager mockedConnManager = mock(ConnectorsManager.class); - Connector connector = Connector.builder() - .classLoader(mockedClassLoader) - .build(); - when(mockedConnManager.getConnector("cassandra")).thenReturn(connector); - when(mockedConnManager.getSinkArchive(any())).thenReturn(getPulsarIOCassandraNar().toPath()); - - when(mockedWorkerService.getConnectorsManager()).thenReturn(mockedConnManager); - + registerBuiltinConnector("cassandra", getPulsarIOCassandraNar()); when(mockedRuntimeFactory.externallyManaged()).thenReturn(true); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(sink))).thenReturn(false); @@ -1838,21 +1532,19 @@ public void testRegisterSinkSuccessK8sWithUpload() throws Exception { SinkConfig sinkConfig = createDefaultSinkConfig(); sinkConfig.setArchive("builtin://cassandra"); - try (FileInputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { - try { - resource.registerSink( - tenant, - namespace, - sink, - inputStream, - mockedFormData, - null, - sinkConfig, - null); - Assert.fail(); - } catch (RuntimeException e) { - Assert.assertEquals(e.getMessage(), injectedErrMsg); - } + try { + resource.registerSink( + tenant, + namespace, + sink, + null, + mockedFormData, + null, + sinkConfig, + null); + Assert.fail(); + } catch (RuntimeException e) { + Assert.assertEquals(e.getMessage(), injectedErrMsg); } } @@ -1865,11 +1557,6 @@ public void testUpdateSinkWithNoChange() throws IOException { mockStatic(SinkConfigUtils.class, ctx -> { ctx.when(() -> SinkConfigUtils.convertFromDetails(any())).thenReturn(sinkConfig); - ctx.when(() -> SinkConfigUtils.convert(any(), any())).thenCallRealMethod(); - ctx.when(() -> SinkConfigUtils.validateUpdate(any(), any())).thenCallRealMethod(); - ctx.when(() -> SinkConfigUtils.clone(any())).thenCallRealMethod(); - ctx.when(() -> SinkConfigUtils.collectAllInputTopics(any())).thenCallRealMethod(); - ctx.when(() -> SinkConfigUtils.validateAndExtractDetails(any(),any(),any(),anyBoolean())).thenCallRealMethod(); }); mockFunctionCommon(sinkConfig.getTenant(), sinkConfig.getNamespace(), sinkConfig.getName()); @@ -1912,15 +1599,17 @@ public void testUpdateSinkWithNoChange() throws IOException { // no changes but set the auth-update flag to true, should not fail UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); updateOptions.setUpdateAuthData(true); - resource.updateSink( - sinkConfig.getTenant(), - sinkConfig.getNamespace(), - sinkConfig.getName(), - null, - mockedFormData, - null, - sinkConfig, - null, - updateOptions); + try (FileInputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { + resource.updateSink( + sinkConfig.getTenant(), + sinkConfig.getNamespace(), + sinkConfig.getName(), + inputStream, + mockedFormData, + null, + sinkConfig, + null, + updateOptions); + } } } diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SourceApiV3ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SourceApiV3ResourceTest.java index eabf954cc77b1..f02acbd3663bf 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SourceApiV3ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/SourceApiV3ResourceTest.java @@ -18,17 +18,10 @@ */ package org.apache.pulsar.functions.worker.rest.api.v3; -import static org.apache.pulsar.functions.worker.rest.api.v3.SinkApiV3ResourceTest.getPulsarIOCassandraNar; -import static org.apache.pulsar.functions.worker.rest.api.v3.SinkApiV3ResourceTest.getPulsarIOInvalidNar; -import static org.apache.pulsar.functions.worker.rest.api.v3.SinkApiV3ResourceTest.getPulsarIOTwitterNar; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; @@ -41,31 +34,18 @@ import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.nio.file.StandardCopyOption; -import java.util.HashMap; +import java.lang.reflect.Method; import java.util.LinkedList; import java.util.List; -import java.util.Map; -import java.util.function.Consumer; import javax.ws.rs.core.Response; import org.apache.distributedlog.api.namespace.Namespace; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.core.config.Configurator; import org.apache.pulsar.broker.authentication.AuthenticationParameters; -import org.apache.pulsar.client.admin.Functions; -import org.apache.pulsar.client.admin.Namespaces; -import org.apache.pulsar.client.admin.Packages; -import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; -import org.apache.pulsar.client.admin.Tenants; import org.apache.pulsar.common.functions.UpdateOptionsImpl; import org.apache.pulsar.common.functions.Utils; import org.apache.pulsar.common.io.SourceConfig; -import org.apache.pulsar.common.nar.NarClassLoader; -import org.apache.pulsar.common.nar.NarClassLoaderBuilder; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.functions.api.utils.IdentityFunction; @@ -75,159 +55,43 @@ import org.apache.pulsar.functions.proto.Function.ProcessingGuarantees; import org.apache.pulsar.functions.proto.Function.SinkSpec; import org.apache.pulsar.functions.proto.Function.SourceSpec; -import org.apache.pulsar.functions.runtime.RuntimeFactory; import org.apache.pulsar.functions.source.TopicSchema; -import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.utils.SourceConfigUtils; import org.apache.pulsar.functions.utils.io.ConnectorUtils; -import org.apache.pulsar.functions.worker.FunctionMetaDataManager; -import org.apache.pulsar.functions.worker.FunctionRuntimeManager; -import org.apache.pulsar.functions.worker.LeaderService; -import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerUtils; -import org.apache.pulsar.functions.worker.rest.api.PulsarFunctionTestTemporaryDirectory; import org.apache.pulsar.functions.worker.rest.api.SourcesImpl; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; import org.mockito.MockedStatic; import org.mockito.Mockito; -import org.testng.annotations.AfterClass; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; /** * Unit test of {@link SourcesApiV3Resource}. */ -public class SourceApiV3ResourceTest { +public class SourceApiV3ResourceTest extends AbstractFunctionsResourceTest { - private static final String tenant = "test-tenant"; - private static final String namespace = "test-namespace"; private static final String source = "test-source"; private static final String outputTopic = "test-output-topic"; private static final String outputSerdeClassName = TopicSchema.DEFAULT_SERDE; private static final String TWITTER_FIRE_HOSE = "org.apache.pulsar.io.twitter.TwitterFireHose"; - private static final int parallelism = 1; - - private PulsarWorkerService mockedWorkerService; - private PulsarAdmin mockedPulsarAdmin; - private Tenants mockedTenants; - private Namespaces mockedNamespaces; - private Functions mockedFunctions; - private TenantInfoImpl mockedTenantInfo; - private List namespaceList = new LinkedList<>(); - private FunctionMetaDataManager mockedManager; - private FunctionRuntimeManager mockedFunctionRunTimeManager; - private RuntimeFactory mockedRuntimeFactory; - private Namespace mockedNamespace; private SourcesImpl resource; - private InputStream mockedInputStream; - private FormDataContentDisposition mockedFormData; - private FunctionMetaData mockedFunctionMetaData; - private LeaderService mockedLeaderService; - private Packages mockedPackages; - private PulsarFunctionTestTemporaryDirectory tempDirectory; - - private static NarClassLoader narClassLoader; - private static Map mockStaticContexts = new HashMap<>(); - - @BeforeClass - public void setupNarClassLoader() throws IOException { - narClassLoader = NarClassLoaderBuilder.builder().narFile(getPulsarIOTwitterNar()).build(); - } - - @AfterClass(alwaysRun = true) - public void cleanupNarClassLoader() throws IOException { - if (narClassLoader != null) { - narClassLoader.close(); - narClassLoader = null; - } - } - - @BeforeMethod - public void setup() throws Exception { - this.mockedManager = mock(FunctionMetaDataManager.class); - this.mockedFunctionRunTimeManager = mock(FunctionRuntimeManager.class); - this.mockedRuntimeFactory = mock(RuntimeFactory.class); - this.mockedInputStream = mock(InputStream.class); - this.mockedNamespace = mock(Namespace.class); - this.mockedFormData = mock(FormDataContentDisposition.class); - when(mockedFormData.getFileName()).thenReturn("test"); - this.mockedTenantInfo = mock(TenantInfoImpl.class); - this.mockedPulsarAdmin = mock(PulsarAdmin.class); - this.mockedTenants = mock(Tenants.class); - this.mockedNamespaces = mock(Namespaces.class); - this.mockedFunctions = mock(Functions.class); - this.mockedLeaderService = mock(LeaderService.class); - this.mockedPackages = mock(Packages.class); - namespaceList.add(tenant + "/" + namespace); - - this.mockedWorkerService = mock(PulsarWorkerService.class); - when(mockedWorkerService.getFunctionMetaDataManager()).thenReturn(mockedManager); - when(mockedWorkerService.getLeaderService()).thenReturn(mockedLeaderService); - when(mockedWorkerService.getFunctionRuntimeManager()).thenReturn(mockedFunctionRunTimeManager); - when(mockedFunctionRunTimeManager.getRuntimeFactory()).thenReturn(mockedRuntimeFactory); - when(mockedWorkerService.getDlogNamespace()).thenReturn(mockedNamespace); - when(mockedWorkerService.isInitialized()).thenReturn(true); - when(mockedWorkerService.getBrokerAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedWorkerService.getFunctionAdmin()).thenReturn(mockedPulsarAdmin); - when(mockedPulsarAdmin.tenants()).thenReturn(mockedTenants); - when(mockedPulsarAdmin.namespaces()).thenReturn(mockedNamespaces); - when(mockedPulsarAdmin.functions()).thenReturn(mockedFunctions); - when(mockedPulsarAdmin.packages()).thenReturn(mockedPackages); - when(mockedTenants.getTenantInfo(any())).thenReturn(mockedTenantInfo); - when(mockedNamespaces.getNamespaces(any())).thenReturn(namespaceList); - when(mockedLeaderService.isLeader()).thenReturn(true); - doAnswer(invocationOnMock -> { - Files.copy(getPulsarIOTwitterNar().toPath(), Paths.get(invocationOnMock.getArgument(1, String.class)), - StandardCopyOption.REPLACE_EXISTING); - return null; - }).when(mockedPackages).download(any(), any()); - - // worker config - WorkerConfig workerConfig = new WorkerConfig() - .setWorkerId("test") - .setWorkerPort(8080) - .setFunctionMetadataTopicName("pulsar/functions") - .setNumFunctionPackageReplicas(3) - .setPulsarServiceUrl("pulsar://localhost:6650/"); - tempDirectory = PulsarFunctionTestTemporaryDirectory.create(getClass().getSimpleName()); - tempDirectory.useTemporaryDirectoriesForWorkerConfig(workerConfig); - when(mockedWorkerService.getWorkerConfig()).thenReturn(workerConfig); + @Override + protected void doSetup() { this.resource = spy(new SourcesImpl(() -> mockedWorkerService)); } - private void mockStatic(Class classStatic, Consumer> consumer) { - final MockedStatic mockedStatic = - mockStaticContexts.computeIfAbsent(classStatic.getName(), name -> Mockito.mockStatic(classStatic)); - consumer.accept(mockedStatic); - } - - @AfterMethod(alwaysRun = true) - public void cleanup() { - if (tempDirectory != null) { - tempDirectory.delete(); + @Override + protected void customizeWorkerConfig(WorkerConfig workerConfig, Method method) { + if (method.getName().endsWith("UploadFailure") || method.getName().contains("BKPackage")) { + workerConfig.setFunctionsWorkerEnablePackageManagement(false); } - mockStaticContexts.values().forEach(MockedStatic::close); - mockStaticContexts.clear(); } - private void mockWorkerUtils() { - mockStatic(WorkerUtils.class, - ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - }); - } - - private void mockWorkerUtils(Consumer> consumer) { - mockStatic(WorkerUtils.class, ctx -> { - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); - if (consumer != null) { - consumer.accept(ctx); - } - }); + @Override + protected FunctionDetails.ComponentType getComponentType() { + return FunctionDetails.ComponentType.SOURCE; } // @@ -297,8 +161,8 @@ public void testRegisterSourceMissingSourceName() { } } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source class UnknownClass must" - + " be in class path") + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source class UnknownClass not " + + "found in class loader") public void testRegisterSourceWrongClassName() { try { testRegisterSourceMissingArguments( @@ -361,10 +225,8 @@ public void testRegisterSourceMissingPackageDetails() throws IOException { } } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source package does not have the" - + " correct format. Pulsar cannot determine if the package is a NAR package" - + " or JAR package. Source classname is not provided and attempts to load it as a NAR package " - + "produced the following error.") + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source package doesn't contain" + + " the META-INF/services/pulsar-io.yaml file.") public void testRegisterSourceMissingPackageDetailsAndClassname() { try { testRegisterSourceMissingArguments( @@ -385,8 +247,8 @@ public void testRegisterSourceMissingPackageDetailsAndClassname() { } } - @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Failed to extract source class" - + " from archive") + @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source package doesn't contain" + + " the META-INF/services/pulsar-io.yaml file.") public void testRegisterSourceInvalidJarWithNoSource() throws IOException { try (InputStream inputStream = new FileInputStream(getPulsarIOInvalidNar())) { testRegisterSourceMissingArguments( @@ -524,7 +386,7 @@ public void testUpdateMissingSinkConfig() { } private void registerDefaultSource() throws IOException { - registerDefaultSourceWithPackageUrl("source://public/default/test@v1"); + registerDefaultSourceWithPackageUrl(getPulsarIOTwitterNar().toURI().toString()); } private void registerDefaultSourceWithPackageUrl(String packageUrl) throws IOException { @@ -565,8 +427,6 @@ public void testRegisterSourceUploadFailure() throws Exception { any(File.class), any(Namespace.class))) .thenThrow(new IOException("upload failure")); - - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); }); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(source))).thenReturn(false); @@ -593,7 +453,7 @@ public void testRegisterSourceSuccess() throws Exception { @Test(timeOut = 20000) public void testRegisterSourceSuccessWithPackageName() throws IOException { - registerDefaultSourceWithPackageUrl("source://public/default/test@v1"); + registerDefaultSource(); } @Test(timeOut = 20000) @@ -621,14 +481,7 @@ public void testRegisterSourceConflictingFields() throws Exception { when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(source))).thenReturn(true); when(mockedManager.containsFunction(eq(actualTenant), eq(actualNamespace), eq(actualName))).thenReturn(false); - SourceConfig sourceConfig = new SourceConfig(); - sourceConfig.setTenant(tenant); - sourceConfig.setNamespace(namespace); - sourceConfig.setName(source); - sourceConfig.setClassName(TWITTER_FIRE_HOSE); - sourceConfig.setParallelism(parallelism); - sourceConfig.setTopicName(outputTopic); - sourceConfig.setSerdeClassName(outputSerdeClassName); + SourceConfig sourceConfig = createDefaultSourceConfig(); try (InputStream inputStream = new FileInputStream(getPulsarIOTwitterNar())) { resource.registerSource( actualTenant, @@ -815,17 +668,19 @@ public void testUpdateSourceChangedParallelism() throws Exception { try { mockWorkerUtils(); - testUpdateSourceMissingArguments( - tenant, - namespace, - source, - null, - mockedFormData, - outputTopic, - outputSerdeClassName, - TWITTER_FIRE_HOSE, - parallelism + 1, - null); + try(FileInputStream inputStream = new FileInputStream(getPulsarIOTwitterNar())) { + testUpdateSourceMissingArguments( + tenant, + namespace, + source, + inputStream, + mockedFormData, + outputTopic, + outputSerdeClassName, + TWITTER_FIRE_HOSE, + parallelism + 1, + null); + } } catch (RestException re) { assertEquals(re.getResponse().getStatusInfo(), Response.Status.BAD_REQUEST); throw re; @@ -836,31 +691,29 @@ public void testUpdateSourceChangedParallelism() throws Exception { public void testUpdateSourceChangedTopic() throws Exception { mockWorkerUtils(); - testUpdateSourceMissingArguments( - tenant, - namespace, - source, - null, - mockedFormData, - "DifferentTopic", - outputSerdeClassName, - TWITTER_FIRE_HOSE, - parallelism, - null); + try(FileInputStream inputStream = new FileInputStream(getPulsarIOTwitterNar())) { + testUpdateSourceMissingArguments( + tenant, + namespace, + source, + inputStream, + mockedFormData, + "DifferentTopic", + outputSerdeClassName, + TWITTER_FIRE_HOSE, + parallelism, + null); + } } @Test - public void testUpdateSourceWithNoChange() { + public void testUpdateSourceWithNoChange() throws IOException { mockWorkerUtils(); // No change on config, SourceConfig sourceConfig = createDefaultSourceConfig(); mockStatic(SourceConfigUtils.class, ctx -> { ctx.when(() -> SourceConfigUtils.convertFromDetails(any())).thenReturn(sourceConfig); - ctx.when(() -> SourceConfigUtils.convert(any(), any())).thenCallRealMethod(); - ctx.when(() -> SourceConfigUtils.validateUpdate(any(), any())).thenCallRealMethod(); - ctx.when(() -> SourceConfigUtils.clone(any())).thenCallRealMethod(); - ctx.when(() -> SourceConfigUtils.validateAndExtractDetails(any(),any(),anyBoolean())).thenCallRealMethod(); }); mockFunctionCommon(sourceConfig.getTenant(), sourceConfig.getNamespace(), sourceConfig.getName()); @@ -903,16 +756,18 @@ public void testUpdateSourceWithNoChange() { // no changes but set the auth-update flag to true, should not fail UpdateOptionsImpl updateOptions = new UpdateOptionsImpl(); updateOptions.setUpdateAuthData(true); - resource.updateSource( - sourceConfig.getTenant(), - sourceConfig.getNamespace(), - sourceConfig.getName(), - null, - mockedFormData, - null, - sourceConfig, - null, - updateOptions); + try (InputStream inputStream = new FileInputStream(getPulsarIOTwitterNar())) { + resource.updateSource( + sourceConfig.getTenant(), + sourceConfig.getNamespace(), + sourceConfig.getName(), + inputStream, + mockedFormData, + null, + sourceConfig, + null, + updateOptions); + } } @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source parallelism must be a " @@ -997,14 +852,6 @@ private void mockFunctionCommon(String tenant, String namespace, String function }); mockStatic(ClassLoaderUtils.class, c -> { }); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.createPkgTempFile()).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSourceType(argThat(clazz -> clazz.getName().equals(TWITTER_FIRE_HOSE)))) - .thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())) - .thenReturn(narClassLoader); - }); this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); @@ -1014,49 +861,25 @@ private void mockFunctionCommon(String tenant, String namespace, String function } private void updateDefaultSource() throws Exception { - updateDefaultSourceWithPackageUrl(null); + updateDefaultSourceWithPackageUrl(getPulsarIOTwitterNar().toURI().toString()); } private void updateDefaultSourceWithPackageUrl(String packageUrl) throws Exception { - SourceConfig sourceConfig = new SourceConfig(); - sourceConfig.setTenant(tenant); - sourceConfig.setNamespace(namespace); - sourceConfig.setName(source); - sourceConfig.setClassName(TWITTER_FIRE_HOSE); - sourceConfig.setParallelism(parallelism); - sourceConfig.setTopicName(outputTopic); - sourceConfig.setSerdeClassName(outputSerdeClassName); - - mockStatic(ConnectorUtils.class, c -> { - }); - - mockStatic(ClassLoaderUtils.class, c -> { - }); - - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.createPkgTempFile()).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSourceType(argThat(clazz -> clazz.getName().equals(TWITTER_FIRE_HOSE)))) - .thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())) - .thenReturn(narClassLoader); - }); + SourceConfig sourceConfig = createDefaultSourceConfig(); this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); - try (InputStream inputStream = new FileInputStream(getPulsarIOCassandraNar())) { - resource.updateSource( - tenant, - namespace, - source, - inputStream, - mockedFormData, - packageUrl, - sourceConfig, - null, null); - } + resource.updateSource( + tenant, + namespace, + source, + null, + mockedFormData, + packageUrl, + sourceConfig, + null, null); } @Test(expectedExceptions = RestException.class, expectedExceptionsMessageRegExp = "Source test-source doesn't " + @@ -1079,11 +902,25 @@ public void testUpdateSourceUploadFailure() throws Exception { anyString(), any(File.class), any(Namespace.class))).thenThrow(new IOException("upload failure")); - ctx.when(() -> WorkerUtils.dumpToTmpFile(any())).thenCallRealMethod(); }); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(source))).thenReturn(true); - updateDefaultSource(); + SourceConfig sourceConfig = createDefaultSourceConfig(); + this.mockedFunctionMetaData = + FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); + when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); + + try(InputStream inputStream = new FileInputStream(getPulsarIOTwitterNar())) { + resource.updateSource( + tenant, + namespace, + source, + inputStream, + mockedFormData, + null, + sourceConfig, + null, null); + } } catch (RestException re) { assertEquals(re.getResponse().getStatusInfo(), Response.Status.INTERNAL_SERVER_ERROR); throw re; @@ -1103,16 +940,9 @@ public void testUpdateSourceSuccess() throws Exception { public void testUpdateSourceWithUrl() throws Exception { Configurator.setRootLevel(Level.DEBUG); - String filePackageUrl = getPulsarIOCassandraNar().toURI().toString(); + String filePackageUrl = getPulsarIOTwitterNar().toURI().toString(); - SourceConfig sourceConfig = new SourceConfig(); - sourceConfig.setTopicName(outputTopic); - sourceConfig.setSerdeClassName(outputSerdeClassName); - sourceConfig.setTenant(tenant); - sourceConfig.setNamespace(namespace); - sourceConfig.setName(source); - sourceConfig.setClassName(TWITTER_FIRE_HOSE); - sourceConfig.setParallelism(parallelism); + SourceConfig sourceConfig = createDefaultSourceConfig(); when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(source))).thenReturn(true); mockStatic(ConnectorUtils.class, c -> { @@ -1120,15 +950,6 @@ public void testUpdateSourceWithUrl() throws Exception { mockStatic(ClassLoaderUtils.class, c -> { }); - mockStatic(FunctionCommon.class, ctx -> { - ctx.when(() -> FunctionCommon.extractFileFromPkgURL(any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getClassLoaderFromPackage(any(), any(), any(), any())).thenCallRealMethod(); - ctx.when(() -> FunctionCommon.getSourceType(argThat(clazz -> clazz.getName().equals(TWITTER_FIRE_HOSE)))) - .thenReturn(String.class); - ctx.when(() -> FunctionCommon.extractNarClassLoader(any(), any())) - .thenReturn(narClassLoader); - }); - this.mockedFunctionMetaData = FunctionMetaData.newBuilder().setFunctionDetails(createDefaultFunctionDetails()).build(); when(mockedManager.getFunctionMetaData(any(), any(), any())).thenReturn(mockedFunctionMetaData); diff --git a/pulsar-io/aerospike/pom.xml b/pulsar-io/aerospike/pom.xml index 7f604add58819..cf648b8dfb6ea 100644 --- a/pulsar-io/aerospike/pom.xml +++ b/pulsar-io/aerospike/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-aerospike Pulsar IO :: Aerospike - - ${project.groupId} pulsar-io-core ${project.version} - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.aerospike aerospike-client-bc @@ -63,9 +57,7 @@ org.bouncycastle bcpkix-jdk18on - - @@ -91,6 +83,4 @@ - - diff --git a/pulsar-io/aerospike/src/main/resources/findbugsExclude.xml b/pulsar-io/aerospike/src/main/resources/findbugsExclude.xml index ddde8120ba518..47c3d73af5f06 100644 --- a/pulsar-io/aerospike/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/aerospike/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-io/alluxio/pom.xml b/pulsar-io/alluxio/pom.xml index 878194d8bd2ba..6101573b33f3d 100644 --- a/pulsar-io/alluxio/pom.xml +++ b/pulsar-io/alluxio/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - pulsar-io - org.apache.pulsar - 3.1.0-SNAPSHOT - - - - 2.7.3 - 4.1.11 - 1.37.0 - - - pulsar-io-alluxio - Pulsar IO :: Alluxio - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - ${project.groupId} - pulsar-client-original - ${project.version} - test - - - - org.alluxio - alluxio-core-client-fs - ${alluxio.version} - - - grpc-netty - io.grpc - - - - - - org.alluxio - alluxio-minicluster - ${alluxio.version} - test - - - org.glassfish - javax.el - - - grpc-netty - io.grpc - - - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - - com.google.guava - guava - - - - - io.grpc - grpc-netty - ${grpc.version} - - - - io.dropwizard.metrics - metrics-jvm - ${metrics.version} - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - + + 4.0.0 + + pulsar-io + io.streamnative + 3.1.0-SNAPSHOT + + + 2.7.3 + 4.1.11 + 1.37.0 + + pulsar-io-alluxio + Pulsar IO :: Alluxio + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + ${project.groupId} + pulsar-client-original + ${project.version} + test + + + org.alluxio + alluxio-core-client-fs + ${alluxio.version} + + + grpc-netty + io.grpc + + + + + org.alluxio + alluxio-minicluster + ${alluxio.version} + test + + + org.glassfish + javax.el + + + grpc-netty + io.grpc + + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + com.google.guava + guava + + + + io.grpc + grpc-netty + ${grpc.version} + + + io.dropwizard.metrics + metrics-jvm + ${metrics.version} + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + diff --git a/pulsar-io/aws/pom.xml b/pulsar-io/aws/pom.xml index 74e74f6e25851..1bb9efee198ae 100644 --- a/pulsar-io/aws/pom.xml +++ b/pulsar-io/aws/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-aws Pulsar IO :: IO AWS - ${project.groupId} pulsar-io-core ${project.version} - com.google.code.gson gson - com.amazonaws aws-java-sdk-sts - software.amazon.awssdk sts 2.10.56 - - + - diff --git a/pulsar-io/aws/src/main/resources/findbugsExclude.xml b/pulsar-io/aws/src/main/resources/findbugsExclude.xml index ddde8120ba518..47c3d73af5f06 100644 --- a/pulsar-io/aws/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/aws/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - \ No newline at end of file + diff --git a/pulsar-io/batch-data-generator/pom.xml b/pulsar-io/batch-data-generator/pom.xml index 1a6bffd531ffb..3ab06875d0c17 100644 --- a/pulsar-io/batch-data-generator/pom.xml +++ b/pulsar-io/batch-data-generator/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io - 3.1.0-SNAPSHOT - - - pulsar-io-batch-data-generator - Pulsar IO :: Batch Data Generator - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - ${project.groupId} - pulsar-io-batch-discovery-triggerers - ${project.version} - - - - org.springframework - spring-context - ${spring.version} - - - - io.codearte.jfairy - jfairy - 0.5.9 - - - - org.apache.avro - avro - ${avro.version} - - - - ${project.groupId} - pulsar-functions-local-runner-original - ${project.version} - test - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - spotbugs - verify - - check - - - - - - + + 4.0.0 + + io.streamnative + pulsar-io + 3.1.0-SNAPSHOT + + pulsar-io-batch-data-generator + Pulsar IO :: Batch Data Generator + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + ${project.groupId} + pulsar-io-batch-discovery-triggerers + ${project.version} + + + org.springframework + spring-context + ${spring.version} + + + io.codearte.jfairy + jfairy + 0.5.9 + + + org.apache.avro + avro + ${avro.version} + + + ${project.groupId} + pulsar-functions-local-runner-original + ${project.version} + test + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + spotbugs + verify + + check + + + + + + diff --git a/pulsar-io/batch-data-generator/src/main/resources/findbugsExclude.xml b/pulsar-io/batch-data-generator/src/main/resources/findbugsExclude.xml index 0b1652ef62881..1d0d64fba7e67 100644 --- a/pulsar-io/batch-data-generator/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/batch-data-generator/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-batch-discovery-triggerers Pulsar IO :: Batch Discovery Triggerers - ${project.groupId} pulsar-io-core ${project.version} - com.cronutils cron-utils ${cron-utils.version} - org.springframework spring-context ${spring.version} - - @@ -73,5 +66,4 @@ - diff --git a/pulsar-io/batch-discovery-triggerers/src/main/resources/findbugsExclude.xml b/pulsar-io/batch-discovery-triggerers/src/main/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-io/batch-discovery-triggerers/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/batch-discovery-triggerers/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - org.apache.pulsar - pulsar-io - 3.1.0-SNAPSHOT - - 4.0.0 - - pulsar-io-canal - Pulsar IO :: Canal - - - 1.1.5 - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - com.fasterxml.jackson.core - jackson-databind - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - com.alibaba - fastjson - 1.2.83 - - - - org.springframework - spring-core - ${spring.version} - - - org.springframework - spring-aop - ${spring.version} - - - org.springframework - spring-context - ${spring.version} - - - org.springframework - spring-jdbc - ${spring.version} - - - org.springframework - spring-orm - ${spring.version} - - - - com.alibaba.otter - canal.protocol - ${canal.version} - - - ch.qos.logback - * - - - - - com.alibaba.otter - canal.client - ${canal.version} - - - ch.qos.logback - * - - - org.springframework - * - - - - - - org.apache.logging.log4j - log4j-core - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - - - \ No newline at end of file + + + io.streamnative + pulsar-io + 3.1.0-SNAPSHOT + + 4.0.0 + pulsar-io-canal + Pulsar IO :: Canal + + 1.1.5 + + + + ${project.groupId} + pulsar-io-common + ${project.version} + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + com.alibaba + fastjson + 1.2.83 + + + org.springframework + spring-core + ${spring.version} + + + org.springframework + spring-aop + ${spring.version} + + + org.springframework + spring-context + ${spring.version} + + + org.springframework + spring-jdbc + ${spring.version} + + + org.springframework + spring-orm + ${spring.version} + + + com.alibaba.otter + canal.protocol + ${canal.version} + + + ch.qos.logback + * + + + + + com.alibaba.otter + canal.client + ${canal.version} + + + ch.qos.logback + * + + + org.springframework + * + + + + + org.apache.logging.log4j + log4j-core + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + + diff --git a/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalAbstractSource.java b/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalAbstractSource.java index 06c8788d5aea1..7d0cd0305a49e 100644 --- a/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalAbstractSource.java +++ b/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalAbstractSource.java @@ -57,7 +57,7 @@ public abstract class CanalAbstractSource extends PushSource { @Override public void open(Map config, SourceContext sourceContext) throws Exception { - canalSourceConfig = CanalSourceConfig.load(config); + canalSourceConfig = CanalSourceConfig.load(config, sourceContext); if (canalSourceConfig.getCluster()) { connector = CanalConnectors.newClusterConnector(canalSourceConfig.getZkServers(), canalSourceConfig.getDestination(), canalSourceConfig.getUsername(), diff --git a/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalSourceConfig.java b/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalSourceConfig.java index a0408e60e5f76..5a754988ffdc1 100644 --- a/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalSourceConfig.java +++ b/pulsar-io/canal/src/main/java/org/apache/pulsar/io/canal/CanalSourceConfig.java @@ -26,6 +26,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @@ -86,8 +88,7 @@ public static CanalSourceConfig load(String yamlFile) throws IOException { } - public static CanalSourceConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), CanalSourceConfig.class); + public static CanalSourceConfig load(Map map, SourceContext sourceContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, CanalSourceConfig.class, sourceContext); } } diff --git a/pulsar-io/cassandra/pom.xml b/pulsar-io/cassandra/pom.xml index 3859bd1029e86..8e016fe895860 100644 --- a/pulsar-io/cassandra/pom.xml +++ b/pulsar-io/cassandra/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-cassandra Pulsar IO :: Cassandra - - ${project.groupId} pulsar-io-core ${project.version} - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.datastax.cassandra cassandra-driver-core - - diff --git a/pulsar-io/common/pom.xml b/pulsar-io/common/pom.xml index 14a134bc42858..7da845df776ca 100644 --- a/pulsar-io/common/pom.xml +++ b/pulsar-io/common/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar - pulsar-io - 3.1.0-SNAPSHOT - - - pulsar-io-common - Pulsar IO :: IO Common - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - com.fasterxml.jackson.core - jackson-databind - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - spotbugs - verify - - check - - - - - - - + + 4.0.0 + + io.streamnative + pulsar-io + 3.1.0-SNAPSHOT + + pulsar-io-common + Pulsar IO :: IO Common + + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + com.fasterxml.jackson.core + jackson-databind + + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + spotbugs + verify + + check + + + + + + diff --git a/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java b/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java index d15986a897caa..69d981bf68728 100644 --- a/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java +++ b/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java @@ -77,13 +77,14 @@ private static T loadWithSecrets(Map map, Class clazz, } } configs.computeIfAbsent(field.getName(), key -> { - if (fieldDoc.required()) { - throw new IllegalArgumentException(field.getName() + " cannot be null"); - } + // Use default value if it is not null before checking required String value = fieldDoc.defaultValue(); if (value != null && !value.isEmpty()) { return value; } + if (fieldDoc.required()) { + throw new IllegalArgumentException(field.getName() + " cannot be null"); + } return null; }); } diff --git a/pulsar-io/common/src/main/resources/findbugsExclude.xml b/pulsar-io/common/src/main/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-io/common/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/common/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-core Pulsar IO :: IO - ${project.groupId} @@ -37,7 +35,6 @@ ${project.version} - @@ -59,5 +56,4 @@ - diff --git a/pulsar-io/core/src/main/resources/findbugsExclude.xml b/pulsar-io/core/src/main/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-io/core/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/core/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io - 3.1.0-SNAPSHOT - - - pulsar-io-data-generator - Pulsar IO :: Data Generator - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - ${project.groupId} - pulsar-config-validation - ${project.version} - - - - io.codearte.jfairy - jfairy - 0.5.9 - - - - org.apache.avro - avro - ${avro.version} - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - + + 4.0.0 + + io.streamnative + pulsar-io + 3.1.0-SNAPSHOT + + pulsar-io-data-generator + Pulsar IO :: Data Generator + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + ${project.groupId} + pulsar-config-validation + ${project.version} + + + io.codearte.jfairy + jfairy + 0.5.9 + + + org.apache.avro + avro + ${avro.version} + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + diff --git a/pulsar-io/debezium/core/pom.xml b/pulsar-io/debezium/core/pom.xml index 2e5a6f5c24da9..a4a1f36d1e985 100644 --- a/pulsar-io/debezium/core/pom.xml +++ b/pulsar-io/debezium/core/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io-debezium 3.1.0-SNAPSHOT - pulsar-io-debezium-core Pulsar IO :: Debezium :: Core - - ${project.groupId} pulsar-io-core ${project.version} provided - io.debezium debezium-core ${debezium.version} - org.apache.commons commons-lang3 - ${project.groupId} pulsar-common ${project.version} - ${project.groupId} pulsar-io-kafka-connect-adaptor ${project.version} - org.apache.kafka connect-runtime @@ -71,23 +63,24 @@ org.apache.kafka kafka-log4j-appender + + jose4j + org.bitbucket.b_c + - ${project.groupId} pulsar-broker ${project.version} test - ${project.groupId} testmocks ${project.version} test - ${project.groupId} pulsar-broker @@ -95,21 +88,17 @@ test test-jar - io.debezium debezium-connector-mysql ${debezium.version} test - ${project.groupId} pulsar-client-original ${project.version} test - - diff --git a/pulsar-io/debezium/mongodb/pom.xml b/pulsar-io/debezium/mongodb/pom.xml index ad57e0de02b77..f187d71bec204 100644 --- a/pulsar-io/debezium/mongodb/pom.xml +++ b/pulsar-io/debezium/mongodb/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io-debezium - 3.1.0-SNAPSHOT - - - pulsar-io-debezium-mongodb - Pulsar IO :: Debezium :: mongodb - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - provided - - - - ${project.groupId} - pulsar-io-debezium-core - ${project.version} - - - - io.debezium - debezium-connector-mongodb - ${debezium.version} - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - + + 4.0.0 + + io.streamnative + pulsar-io-debezium + 3.1.0-SNAPSHOT + + pulsar-io-debezium-mongodb + Pulsar IO :: Debezium :: mongodb + + + ${project.groupId} + pulsar-io-core + ${project.version} + provided + + + ${project.groupId} + pulsar-io-debezium-core + ${project.version} + + + io.debezium + debezium-connector-mongodb + ${debezium.version} + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + diff --git a/pulsar-io/debezium/mssql/pom.xml b/pulsar-io/debezium/mssql/pom.xml index 4db8477f185bc..fe10f2b269e2c 100644 --- a/pulsar-io/debezium/mssql/pom.xml +++ b/pulsar-io/debezium/mssql/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io-debezium 3.1.0-SNAPSHOT - pulsar-io-debezium-mssql Pulsar IO :: Debezium :: Microsoft SQL - - ${project.groupId} pulsar-io-core ${project.version} provided - ${project.groupId} pulsar-io-debezium-core ${project.version} - io.debezium debezium-connector-sqlserver ${debezium.version} - - @@ -61,5 +54,4 @@ - diff --git a/pulsar-io/debezium/mysql/pom.xml b/pulsar-io/debezium/mysql/pom.xml index 8c443f2f6b6df..691b2cc537e50 100644 --- a/pulsar-io/debezium/mysql/pom.xml +++ b/pulsar-io/debezium/mysql/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io-debezium 3.1.0-SNAPSHOT - pulsar-io-debezium-mysql Pulsar IO :: Debezium :: mysql - ${project.groupId} @@ -37,21 +35,17 @@ ${project.version} provided - ${project.groupId} pulsar-io-debezium-core ${project.version} - io.debezium debezium-connector-mysql ${debezium.version} - - @@ -61,8 +55,6 @@ - - @@ -71,5 +63,4 @@ - diff --git a/pulsar-io/debezium/oracle/pom.xml b/pulsar-io/debezium/oracle/pom.xml index 9d6bffd38f2d2..ebd8ff0585559 100644 --- a/pulsar-io/debezium/oracle/pom.xml +++ b/pulsar-io/debezium/oracle/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io-debezium 3.1.0-SNAPSHOT - pulsar-io-debezium-oracle Pulsar IO :: Debezium :: oracle - - ${project.groupId} pulsar-io-core ${project.version} provided - ${project.groupId} pulsar-io-debezium-core ${project.version} - io.debezium debezium-connector-oracle ${debezium.version} - - @@ -61,5 +54,4 @@ - diff --git a/pulsar-io/debezium/pom.xml b/pulsar-io/debezium/pom.xml index ba64915466710..e64835abcd848 100644 --- a/pulsar-io/debezium/pom.xml +++ b/pulsar-io/debezium/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-debezium Pulsar IO :: Debezium - @@ -70,7 +68,6 @@ - core mysql @@ -79,5 +76,4 @@ oracle mssql - diff --git a/pulsar-io/debezium/postgres/pom.xml b/pulsar-io/debezium/postgres/pom.xml index 86fba2d3d1f34..a2e271e8ba1a3 100644 --- a/pulsar-io/debezium/postgres/pom.xml +++ b/pulsar-io/debezium/postgres/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io-debezium 3.1.0-SNAPSHOT - pulsar-io-debezium-postgres Pulsar IO :: Debezium :: postgres - - ${project.groupId} pulsar-io-core ${project.version} provided - ${project.groupId} pulsar-io-debezium-core ${project.version} - io.debezium debezium-connector-postgres ${debezium.version} - org.postgresql postgresql ${debezium.postgresql.version} runtime - - @@ -68,5 +60,4 @@ - diff --git a/pulsar-io/docs/pom.xml b/pulsar-io/docs/pom.xml index 0c772104dbbfc..e200ba1f116b4 100644 --- a/pulsar-io/docs/pom.xml +++ b/pulsar-io/docs/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-docs Pulsar IO :: Docs - - ${project.groupId} pulsar-io-core @@ -45,7 +42,6 @@ com.beust jcommander - ${project.groupId} @@ -218,7 +214,6 @@ ${project.version} - @@ -267,5 +262,4 @@ - diff --git a/pulsar-io/docs/src/main/resources/findbugsExclude.xml b/pulsar-io/docs/src/main/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-io/docs/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/docs/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-dynamodb Pulsar IO :: DynamoDB - - + + ${project.groupId} + pulsar-io-common + ${project.version} + ${project.groupId} pulsar-io-core ${project.version} - ${project.groupId} pulsar-functions-instance ${project.version} provided - ${project.groupId} pulsar-io-aws ${project.version} - org.apache.commons commons-lang3 - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.fasterxml.jackson.dataformat jackson-dataformat-cbor - com.google.code.gson gson - com.amazonaws aws-java-sdk-core - com.amazonaws @@ -90,9 +83,7 @@ 1.5.1 - - @@ -101,5 +92,4 @@ - diff --git a/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSource.java b/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSource.java index d67c4e21154ee..2193cf39c17a5 100644 --- a/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSource.java +++ b/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSource.java @@ -65,7 +65,7 @@ public void close() throws Exception { @Override public void open(Map config, SourceContext sourceContext) throws Exception { - this.dynamodbSourceConfig = DynamoDBSourceConfig.load(config); + this.dynamodbSourceConfig = DynamoDBSourceConfig.load(config, sourceContext); checkArgument(isNotBlank(dynamodbSourceConfig.getAwsDynamodbStreamArn()), "empty dynamo-stream arn"); // Even if the endpoint is set, it seems to require a region to go with it diff --git a/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfig.java b/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfig.java index b734dd5741155..0547ff8f863e0 100644 --- a/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfig.java +++ b/pulsar-io/dynamodb/src/main/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfig.java @@ -35,6 +35,8 @@ import java.util.Map; import lombok.Data; import org.apache.pulsar.io.aws.AwsCredentialProviderPlugin; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.core.annotations.FieldDoc; import software.amazon.awssdk.regions.Region; @@ -77,6 +79,7 @@ public class DynamoDBSourceConfig implements Serializable { @FieldDoc( required = false, defaultValue = "", + sensitive = true, help = "json-parameters to initialize `AwsCredentialsProviderPlugin`") private String awsCredentialPluginParam = ""; @@ -170,9 +173,8 @@ public static DynamoDBSourceConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), DynamoDBSourceConfig.class); } - public static DynamoDBSourceConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), DynamoDBSourceConfig.class); + public static DynamoDBSourceConfig load(Map map, SourceContext sourceContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, DynamoDBSourceConfig.class, sourceContext); } protected Region regionAsV2Region() { diff --git a/pulsar-io/dynamodb/src/test/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfigTests.java b/pulsar-io/dynamodb/src/test/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfigTests.java index f84cb785896e6..bdccaa2e5846e 100644 --- a/pulsar-io/dynamodb/src/test/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfigTests.java +++ b/pulsar-io/dynamodb/src/test/java/org/apache/pulsar/io/dynamodb/DynamoDBSourceConfigTests.java @@ -31,6 +31,8 @@ import java.util.Map; import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream; +import org.apache.pulsar.io.core.SourceContext; +import org.mockito.Mockito; import org.testng.annotations.Test; @@ -90,7 +92,8 @@ public final void loadFromMapTest() throws IOException { map.put("initialPositionInStream", InitialPositionInStream.TRIM_HORIZON); map.put("startAtTime", DAY); - DynamoDBSourceConfig config = DynamoDBSourceConfig.load(map); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + DynamoDBSourceConfig config = DynamoDBSourceConfig.load(map, sourceContext); assertNotNull(config); assertEquals(config.getAwsEndpoint(), "https://some.endpoint.aws"); @@ -111,7 +114,46 @@ public final void loadFromMapTest() throws IOException { ZonedDateTime expected = ZonedDateTime.ofInstant(DAY.toInstant(), ZoneOffset.UTC); assertEquals(actual, expected); } - + + @Test + public final void loadFromMapCredentialFromSecretTest() throws IOException { + Map map = new HashMap (); + map.put("awsEndpoint", "https://some.endpoint.aws"); + map.put("awsRegion", "us-east-1"); + map.put("awsDynamodbStreamArn", "arn:aws:dynamodb:us-west-2:111122223333:table/TestTable/stream/2015-05-11T21:21:33.291"); + map.put("checkpointInterval", "30000"); + map.put("backoffTime", "4000"); + map.put("numRetries", "3"); + map.put("receiveQueueSize", 2000); + map.put("applicationName", "My test application"); + map.put("initialPositionInStream", InitialPositionInStream.TRIM_HORIZON); + map.put("startAtTime", DAY); + + SourceContext sourceContext = Mockito.mock(SourceContext.class); + Mockito.when(sourceContext.getSecret("awsCredentialPluginParam")) + .thenReturn("{\"accessKey\":\"myKey\",\"secretKey\":\"my-Secret\"}"); + DynamoDBSourceConfig config = DynamoDBSourceConfig.load(map, sourceContext); + + assertNotNull(config); + assertEquals(config.getAwsEndpoint(), "https://some.endpoint.aws"); + assertEquals(config.getAwsRegion(), "us-east-1"); + assertEquals(config.getAwsDynamodbStreamArn(), "arn:aws:dynamodb:us-west-2:111122223333:table/TestTable/stream/2015-05-11T21:21:33.291"); + assertEquals(config.getAwsCredentialPluginParam(), + "{\"accessKey\":\"myKey\",\"secretKey\":\"my-Secret\"}"); + assertEquals(config.getApplicationName(), "My test application"); + assertEquals(config.getCheckpointInterval(), 30000); + assertEquals(config.getBackoffTime(), 4000); + assertEquals(config.getNumRetries(), 3); + assertEquals(config.getReceiveQueueSize(), 2000); + assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON); + + Calendar cal = Calendar.getInstance(); + cal.setTime(config.getStartAtTime()); + ZonedDateTime actual = ZonedDateTime.ofInstant(cal.toInstant(), ZoneOffset.UTC); + ZonedDateTime expected = ZonedDateTime.ofInstant(DAY.toInstant(), ZoneOffset.UTC); + assertEquals(actual, expected); + } + @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "empty aws-credential param") public final void missingCredentialsTest() throws Exception { @@ -121,7 +163,8 @@ public final void missingCredentialsTest() throws Exception { map.put("awsDynamodbStreamArn", "arn:aws:dynamodb:us-west-2:111122223333:table/TestTable/stream/2015-05-11T21:21:33.291"); DynamoDBSource source = new DynamoDBSource(); - source.open(map, null); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + source.open(map, sourceContext); } @Test(expectedExceptions = IllegalArgumentException.class, @@ -136,7 +179,8 @@ public final void missingStartTimeTest() throws Exception { map.put("initialPositionInStream", InitialPositionInStream.AT_TIMESTAMP); DynamoDBSource source = new DynamoDBSource(); - source.open(map, null); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + source.open(map, sourceContext); } private File getFile(String name) { diff --git a/pulsar-io/elastic-search/pom.xml b/pulsar-io/elastic-search/pom.xml index 24c0bdc052841..0a757a8ab018e 100644 --- a/pulsar-io/elastic-search/pom.xml +++ b/pulsar-io/elastic-search/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io - 3.1.0-SNAPSHOT - - - pulsar-io-flume - Pulsar IO :: Flume - - - 1.8.2 - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - com.fasterxml.jackson.core - jackson-databind - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - - org.apache.flume - flume-ng-node - 1.9.0 - pom - - - org.apache.avro - avro-ipc - - - avro - org.apache.avro - - - - - org.apache.avro - avro - ${avro.version} - - - commons-lang - commons-lang - 2.6 - - - org.apache.avro - avro-ipc - ${avro.version} - - - org.mortbay.jetty - servlet-api - - - io.netty - netty - - - - - org.apache.curator - curator-framework - ${curator.version} - - - org.apache.curator - curator-test - ${curator.version} - test - - - - io.dropwizard.metrics - metrics-core - test - - - - org.xerial.snappy - snappy-java - test - - - - com.google.guava - guava - ${guava.version} - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - - + + io.dropwizard.metrics + metrics-core + test + + + + org.xerial.snappy + snappy-java + test + + + com.google.guava + guava + ${guava.version} + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + + + - - owasp-dependency-check - - - - org.owasp - dependency-check-maven - ${dependency-check-maven.version} - - - - aggregate - - none - - - - - - - - + + owasp-dependency-check + + + + org.owasp + dependency-check-maven + ${dependency-check-maven.version} + + + + aggregate + + none + + + + + + + diff --git a/pulsar-io/hbase/pom.xml b/pulsar-io/hbase/pom.xml index ae2845a791dcd..22e6fec3cb115 100644 --- a/pulsar-io/hbase/pom.xml +++ b/pulsar-io/hbase/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - pulsar-io - org.apache.pulsar - 3.1.0-SNAPSHOT - - pulsar-io-hbase - Pulsar IO :: Hbase - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - ${project.groupId} - pulsar-functions-instance - ${project.version} - - - - ${project.groupId} - pulsar-client-original - ${project.version} - - - - com.fasterxml.jackson.core - jackson-databind - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - - com.google.guava - guava - - - - org.apache.hbase - hbase-client - ${hbase.version} - - - log4j - log4j - - - org.slf4j - slf4j-log4j12 - - - - - - org.apache.hbase - hbase-common - ${hbase.version} - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - - - - owasp-dependency-check - - - - org.owasp - dependency-check-maven - ${dependency-check-maven.version} - - - - aggregate - - none - - - - - - - - + + owasp-dependency-check + + + + org.owasp + dependency-check-maven + ${dependency-check-maven.version} + + + + aggregate + + none + + + + + + + diff --git a/pulsar-io/hbase/src/test/resources/hbase/hbase-site.xml b/pulsar-io/hbase/src/test/resources/hbase/hbase-site.xml index c2b520a765643..460cbd497c15f 100644 --- a/pulsar-io/hbase/src/test/resources/hbase/hbase-site.xml +++ b/pulsar-io/hbase/src/test/resources/hbase/hbase-site.xml @@ -1,3 +1,4 @@ + - - hbase.cluster.distributed - true - - - hbase.rootdir - hdfs://localhost:8020/hbase - - - hbase.zookeeper.quorum - localhost - - - hbase.zookeeper.property.clientPort - 2181 - - - zookeeper.znode.parent - /hbase - + + hbase.cluster.distributed + true + + + hbase.rootdir + hdfs://localhost:8020/hbase + + + hbase.zookeeper.quorum + localhost + + + hbase.zookeeper.property.clientPort + 2181 + + + zookeeper.znode.parent + /hbase + diff --git a/pulsar-io/hdfs2/pom.xml b/pulsar-io/hdfs2/pom.xml index 7b2f11c7b7c21..0735338057c25 100644 --- a/pulsar-io/hdfs2/pom.xml +++ b/pulsar-io/hdfs2/pom.xml @@ -1,3 +1,4 @@ + - - owasp-dependency-check - - - - org.owasp - dependency-check-maven - ${dependency-check-maven.version} - - - - aggregate - - none - - - - - - - - - \ No newline at end of file + + owasp-dependency-check + + + + org.owasp + dependency-check-maven + ${dependency-check-maven.version} + + + + aggregate + + none + + + + + + + + diff --git a/pulsar-io/hdfs2/src/main/resources/findbugsExclude.xml b/pulsar-io/hdfs2/src/main/resources/findbugsExclude.xml index 980349c4d8d5e..93756f7f28461 100644 --- a/pulsar-io/hdfs2/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/hdfs2/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - fs.defaultFS - hdfs://0.0.0.0:8020 - - - io.compression.codecs - org.apache.hadoop.io.compress.GzipCodec, + + fs.defaultFS + hdfs://0.0.0.0:8020 + + + io.compression.codecs + org.apache.hadoop.io.compress.GzipCodec, org.apache.hadoop.io.compress.DefaultCodec, org.apache.hadoop.io.compress.SnappyCodec - + diff --git a/pulsar-io/hdfs2/src/test/resources/hadoop/hdfs-site.xml b/pulsar-io/hdfs2/src/test/resources/hadoop/hdfs-site.xml index bb722f1f63470..1b35ebd585012 100644 --- a/pulsar-io/hdfs2/src/test/resources/hadoop/hdfs-site.xml +++ b/pulsar-io/hdfs2/src/test/resources/hadoop/hdfs-site.xml @@ -1,3 +1,4 @@ + - - dfs.replication - 1 - - - dfs.client.use.datanode.hostname - true - - - dfs.support.append - true - + + dfs.replication + 1 + + + dfs.client.use.datanode.hostname + true + + + dfs.support.append + true + diff --git a/pulsar-io/hdfs3/pom.xml b/pulsar-io/hdfs3/pom.xml index 30f7368102df4..a0aeddad6e85e 100644 --- a/pulsar-io/hdfs3/pom.xml +++ b/pulsar-io/hdfs3/pom.xml @@ -1,3 +1,4 @@ + - - fs.defaultFS - hdfs://0.0.0.0:8020 - - - io.compression.codecs - org.apache.hadoop.io.compress.GzipCodec, + + fs.defaultFS + hdfs://0.0.0.0:8020 + + + io.compression.codecs + org.apache.hadoop.io.compress.GzipCodec, org.apache.hadoop.io.compress.DefaultCodec, org.apache.hadoop.io.compress.SnappyCodec - + diff --git a/pulsar-io/hdfs3/src/test/resources/hadoop/hdfs-site.xml b/pulsar-io/hdfs3/src/test/resources/hadoop/hdfs-site.xml index bb722f1f63470..1b35ebd585012 100644 --- a/pulsar-io/hdfs3/src/test/resources/hadoop/hdfs-site.xml +++ b/pulsar-io/hdfs3/src/test/resources/hadoop/hdfs-site.xml @@ -1,3 +1,4 @@ + - - dfs.replication - 1 - - - dfs.client.use.datanode.hostname - true - - - dfs.support.append - true - + + dfs.replication + 1 + + + dfs.client.use.datanode.hostname + true + + + dfs.support.append + true + diff --git a/pulsar-io/http/pom.xml b/pulsar-io/http/pom.xml index 47e48648e570e..6090fdd5dd623 100644 --- a/pulsar-io/http/pom.xml +++ b/pulsar-io/http/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io - 3.1.0-SNAPSHOT - - - pulsar-io-http - Pulsar IO :: HTTP - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - com.fasterxml.jackson.core - jackson-databind - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - - org.apache.avro - avro - ${avro.version} - - - - org.apache.pulsar - pulsar-client-original - ${project.version} - test - - - - com.github.tomakehurst - wiremock-jre8 - ${wiremock.version} - test - - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - + + 4.0.0 + + io.streamnative + pulsar-io + 3.1.0-SNAPSHOT + + pulsar-io-http + Pulsar IO :: HTTP + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + org.apache.avro + avro + ${avro.version} + + + io.streamnative + pulsar-client-original + ${project.version} + test + + + com.github.tomakehurst + wiremock-jre8 + ${wiremock.version} + test + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + diff --git a/pulsar-io/influxdb/pom.xml b/pulsar-io/influxdb/pom.xml index cad7956ccfcce..1fc124dca3c77 100644 --- a/pulsar-io/influxdb/pom.xml +++ b/pulsar-io/influxdb/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - pulsar-io - org.apache.pulsar - 3.1.0-SNAPSHOT - - - pulsar-io-influxdb - Pulsar IO :: InfluxDB - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - ${project.groupId} - pulsar-functions-instance - ${project.version} - - - ${project.groupId} - pulsar-client-original - ${project.version} - - - - com.influxdb - influxdb-client-java - 4.0.0 - - - - org.influxdb - influxdb-java - 2.22 - - - com.squareup.okhttp3 - * - - - - - - org.apache.commons - commons-lang3 - - - org.apache.commons - commons-collections4 - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - - \ No newline at end of file + + 4.0.0 + + pulsar-io + io.streamnative + 3.1.0-SNAPSHOT + + pulsar-io-influxdb + Pulsar IO :: InfluxDB + + + ${project.groupId} + pulsar-io-common + ${project.version} + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + ${project.groupId} + pulsar-functions-instance + ${project.version} + + + ${project.groupId} + pulsar-client-original + ${project.version} + + + com.influxdb + influxdb-client-java + 4.0.0 + + + org.influxdb + influxdb-java + 2.22 + + + com.squareup.okhttp3 + * + + + + + org.apache.commons + commons-lang3 + + + org.apache.commons + commons-collections4 + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + + diff --git a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/InfluxDBGenericRecordSink.java b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/InfluxDBGenericRecordSink.java index 5b51461fc7b8e..0d431f84c52f2 100644 --- a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/InfluxDBGenericRecordSink.java +++ b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/InfluxDBGenericRecordSink.java @@ -46,12 +46,12 @@ public class InfluxDBGenericRecordSink implements Sink { @Override public void open(Map map, SinkContext sinkContext) throws Exception { try { - val configV2 = InfluxDBSinkConfig.load(map); + val configV2 = InfluxDBSinkConfig.load(map, sinkContext); configV2.validate(); sink = new InfluxDBSink(); } catch (Exception e) { try { - val configV1 = org.apache.pulsar.io.influxdb.v1.InfluxDBSinkConfig.load(map); + val configV1 = org.apache.pulsar.io.influxdb.v1.InfluxDBSinkConfig.load(map, sinkContext); configV1.validate(); sink = new org.apache.pulsar.io.influxdb.v1.InfluxDBGenericRecordSink(); } catch (Exception e1) { diff --git a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBAbstractSink.java b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBAbstractSink.java index 06856bad80edc..217c5304b24f7 100644 --- a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBAbstractSink.java +++ b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBAbstractSink.java @@ -43,7 +43,7 @@ public abstract class InfluxDBAbstractSink extends BatchSink { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - InfluxDBSinkConfig influxDBSinkConfig = InfluxDBSinkConfig.load(config); + InfluxDBSinkConfig influxDBSinkConfig = InfluxDBSinkConfig.load(config, sinkContext); influxDBSinkConfig.validate(); super.init(influxDBSinkConfig.getBatchTimeMs(), influxDBSinkConfig.getBatchSize()); diff --git a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfig.java b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfig.java index 9b7d8e1ce905d..4ae2cf1e4a3a1 100644 --- a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfig.java +++ b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfig.java @@ -27,6 +27,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; /** @@ -94,7 +96,7 @@ public class InfluxDBSinkConfig implements Serializable { @FieldDoc( required = false, - defaultValue = "1000L", + defaultValue = "1000", help = "The InfluxDB operation time in milliseconds") private long batchTimeMs = 1000L; @@ -110,14 +112,11 @@ public static InfluxDBSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), InfluxDBSinkConfig.class); } - public static InfluxDBSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), InfluxDBSinkConfig.class); + public static InfluxDBSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, InfluxDBSinkConfig.class, sinkContext); } public void validate() { - Preconditions.checkNotNull(influxdbUrl, "influxdbUrl property not set."); - Preconditions.checkNotNull(database, "database property not set."); Preconditions.checkArgument(batchSize > 0, "batchSize must be a positive integer."); Preconditions.checkArgument(batchTimeMs > 0, "batchTimeMs must be a positive long."); } diff --git a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSink.java b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSink.java index 08f1ab2339992..0aa43570596af 100644 --- a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSink.java +++ b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSink.java @@ -49,7 +49,7 @@ public class InfluxDBSink extends BatchSink { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - InfluxDBSinkConfig influxDBSinkConfig = InfluxDBSinkConfig.load(config); + InfluxDBSinkConfig influxDBSinkConfig = InfluxDBSinkConfig.load(config, sinkContext); influxDBSinkConfig.validate(); super.init(influxDBSinkConfig.getBatchTimeMs(), influxDBSinkConfig.getBatchSize()); diff --git a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfig.java b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfig.java index 899b00c002155..ea87ee66b90a3 100644 --- a/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfig.java +++ b/pulsar-io/influxdb/src/main/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfig.java @@ -27,6 +27,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; /** @@ -87,7 +89,7 @@ public class InfluxDBSinkConfig implements Serializable { @FieldDoc( required = false, - defaultValue = "1000L", + defaultValue = "1000", help = "The InfluxDB operation time in milliseconds") private long batchTimeMs = 1000; @@ -103,17 +105,11 @@ public static InfluxDBSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), InfluxDBSinkConfig.class); } - public static InfluxDBSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), InfluxDBSinkConfig.class); + public static InfluxDBSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, InfluxDBSinkConfig.class, sinkContext); } public void validate() { - Preconditions.checkNotNull(influxdbUrl, "influxdbUrl property not set."); - Preconditions.checkNotNull(token, "token property not set."); - Preconditions.checkNotNull(organization, "organization property not set."); - Preconditions.checkNotNull(bucket, "bucket property not set."); - Preconditions.checkArgument(batchSize > 0, "batchSize must be a positive integer."); Preconditions.checkArgument(batchTimeMs > 0, "batchTimeMs must be a positive long."); } diff --git a/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfigTest.java b/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfigTest.java index 4493dcfb24854..10b1bfb624f49 100644 --- a/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfigTest.java +++ b/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v1/InfluxDBSinkConfigTest.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.io.influxdb.v1; +import org.apache.pulsar.io.core.SinkContext; import org.influxdb.InfluxDB; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -60,8 +62,11 @@ public final void loadFromMapTest() throws IOException { map.put("gzipEnable", "false"); map.put("batchTimeMs", "1000"); map.put("batchSize", "100"); + map.put("username", "admin"); + map.put("password", "admin"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); assertNotNull(config); assertEquals("http://localhost:8086", config.getInfluxdbUrl()); assertEquals("test_db", config.getDatabase()); @@ -71,6 +76,39 @@ public final void loadFromMapTest() throws IOException { assertEquals(Boolean.parseBoolean("false"), config.isGzipEnable()); assertEquals(Long.parseLong("1000"), config.getBatchTimeMs()); assertEquals(Integer.parseInt("100"), config.getBatchSize()); + assertEquals("admin", config.getUsername()); + assertEquals("admin", config.getPassword()); + } + + @Test + public final void loadFromMapCredentialFromSecretTest() throws IOException { + Map map = new HashMap<>(); + map.put("influxdbUrl", "http://localhost:8086"); + map.put("database", "test_db"); + map.put("consistencyLevel", "ONE"); + map.put("logLevel", "NONE"); + map.put("retentionPolicy", "autogen"); + map.put("gzipEnable", "false"); + map.put("batchTimeMs", "1000"); + map.put("batchSize", "100"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("username")) + .thenReturn("admin"); + Mockito.when(sinkContext.getSecret("password")) + .thenReturn("admin"); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); + assertNotNull(config); + assertEquals("http://localhost:8086", config.getInfluxdbUrl()); + assertEquals("test_db", config.getDatabase()); + assertEquals("ONE", config.getConsistencyLevel()); + assertEquals("NONE", config.getLogLevel()); + assertEquals("autogen", config.getRetentionPolicy()); + assertEquals(Boolean.parseBoolean("false"), config.isGzipEnable()); + assertEquals(Long.parseLong("1000"), config.getBatchTimeMs()); + assertEquals(Integer.parseInt("100"), config.getBatchSize()); + assertEquals("admin", config.getUsername()); + assertEquals("admin", config.getPassword()); } @Test @@ -85,12 +123,13 @@ public final void validValidateTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("batchSize", "100"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "influxdbUrl property not set.") + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "influxdbUrl cannot be null") public final void missingInfluxdbUrlValidateTest() throws IOException { Map map = new HashMap<>(); map.put("database", "test_db"); @@ -101,7 +140,8 @@ public final void missingInfluxdbUrlValidateTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("batchSize", "100"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); } @@ -118,7 +158,8 @@ public final void invalidBatchSizeTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("batchSize", "-100"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); } @@ -135,7 +176,8 @@ public final void invalidConsistencyLevelTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("batchSize", "100"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); InfluxDB.ConsistencyLevel.valueOf(config.getConsistencyLevel().toUpperCase()); diff --git a/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfigTest.java b/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfigTest.java index df1f7fd29a637..d6cee1e308d2b 100644 --- a/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfigTest.java +++ b/pulsar-io/influxdb/src/test/java/org/apache/pulsar/io/influxdb/v2/InfluxDBSinkConfigTest.java @@ -24,6 +24,8 @@ import java.io.File; import java.util.HashMap; import java.util.Map; +import org.apache.pulsar.io.core.SinkContext; +import org.mockito.Mockito; import org.testng.annotations.Test; public class InfluxDBSinkConfigTest { @@ -58,18 +60,34 @@ private Map buildValidConfigMap() { public final void testLoadFromMap() throws Exception { Map map = buildValidConfigMap(); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); assertNotNull(config); config.validate(); verifyValues(config); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "influxdbUrl property not set.") + @Test + public final void testLoadFromMapCredentialFromSecret() throws Exception { + Map map = buildValidConfigMap(); + map.remove("token"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("token")) + .thenReturn("xxxx"); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); + assertNotNull(config); + config.validate(); + verifyValues(config); + } + + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "influxdbUrl cannot be null") public void testRequiredConfigMissing() throws Exception { Map map = buildValidConfigMap(); map.remove("influxdbUrl"); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); } @@ -78,7 +96,8 @@ public void testRequiredConfigMissing() throws Exception { public void testBatchConfig() throws Exception { Map map = buildValidConfigMap(); map.put("batchSize", -1); - InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); config.validate(); } diff --git a/pulsar-io/jdbc/clickhouse/pom.xml b/pulsar-io/jdbc/clickhouse/pom.xml index 14dc030a753e4..065fe08835dc8 100644 --- a/pulsar-io/jdbc/clickhouse/pom.xml +++ b/pulsar-io/jdbc/clickhouse/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.1.0-SNAPSHOT 4.0.0 - pulsar-io-jdbc-clickhouse Pulsar IO :: Jdbc :: ClickHouse - ${project.groupId} @@ -45,13 +42,12 @@ all - * - * + * + * - diff --git a/pulsar-io/jdbc/core/pom.xml b/pulsar-io/jdbc/core/pom.xml index 5c2e97c7fdc29..cd90f322e3c6d 100644 --- a/pulsar-io/jdbc/core/pom.xml +++ b/pulsar-io/jdbc/core/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.1.0-SNAPSHOT 4.0.0 - pulsar-io-jdbc-core Pulsar IO :: Jdbc :: Core - + + ${project.groupId} + pulsar-io-common + ${project.version} + ${project.groupId} pulsar-io-core ${project.version} - org.apache.avro avro ${avro.version} - com.fasterxml.jackson.core jackson-databind - com.google.guava guava - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.google.protobuf protobuf-java provided - - - \ No newline at end of file + diff --git a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java index 4586fcebcf167..ca33b3cfdaba9 100644 --- a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java +++ b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcAbstractSink.java @@ -76,7 +76,7 @@ public abstract class JdbcAbstractSink implements Sink { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - jdbcSinkConfig = JdbcSinkConfig.load(config); + jdbcSinkConfig = JdbcSinkConfig.load(config, sinkContext); jdbcSinkConfig.validate(); jdbcUrl = jdbcSinkConfig.getJdbcUrl(); diff --git a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcSinkConfig.java b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcSinkConfig.java index f798d94f7c35e..854d68381312c 100644 --- a/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcSinkConfig.java +++ b/pulsar-io/jdbc/core/src/main/java/org/apache/pulsar/io/jdbc/JdbcSinkConfig.java @@ -26,6 +26,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @Data @@ -145,9 +147,8 @@ public static JdbcSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), JdbcSinkConfig.class); } - public static JdbcSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), JdbcSinkConfig.class); + public static JdbcSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, JdbcSinkConfig.class, sinkContext); } public void validate() { diff --git a/pulsar-io/jdbc/mariadb/pom.xml b/pulsar-io/jdbc/mariadb/pom.xml index 943406a8b900f..b54b3a25d8a66 100644 --- a/pulsar-io/jdbc/mariadb/pom.xml +++ b/pulsar-io/jdbc/mariadb/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.1.0-SNAPSHOT 4.0.0 - pulsar-io-jdbc-mariadb Pulsar IO :: Jdbc :: Mariadb - ${project.groupId} @@ -52,4 +49,4 @@ - \ No newline at end of file + diff --git a/pulsar-io/jdbc/openmldb/pom.xml b/pulsar-io/jdbc/openmldb/pom.xml index 92376583d25ba..4623729710323 100644 --- a/pulsar-io/jdbc/openmldb/pom.xml +++ b/pulsar-io/jdbc/openmldb/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.1.0-SNAPSHOT 4.0.0 - pulsar-io-jdbc-openmldb Pulsar IO :: Jdbc :: OpenMLDB - ${project.groupId} @@ -60,7 +57,6 @@ runtime - diff --git a/pulsar-io/jdbc/pom.xml b/pulsar-io/jdbc/pom.xml index 5a82e163c554b..88e640621ea85 100644 --- a/pulsar-io/jdbc/pom.xml +++ b/pulsar-io/jdbc/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom @@ -31,12 +31,10 @@ openmldb - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-jdbc Pulsar IO :: Jdbc - diff --git a/pulsar-io/jdbc/postgres/pom.xml b/pulsar-io/jdbc/postgres/pom.xml index e7177cb2e19aa..648b7f2df008b 100644 --- a/pulsar-io/jdbc/postgres/pom.xml +++ b/pulsar-io/jdbc/postgres/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.1.0-SNAPSHOT 4.0.0 - pulsar-io-jdbc-postgres Pulsar IO :: Jdbc :: Postgres - ${project.groupId} @@ -44,7 +41,6 @@ runtime - diff --git a/pulsar-io/jdbc/sqlite/pom.xml b/pulsar-io/jdbc/sqlite/pom.xml index 5cf8833e6684a..7a8e932deb0da 100644 --- a/pulsar-io/jdbc/sqlite/pom.xml +++ b/pulsar-io/jdbc/sqlite/pom.xml @@ -1,3 +1,4 @@ + - + pulsar-io-jdbc - org.apache.pulsar + io.streamnative 3.1.0-SNAPSHOT 4.0.0 pulsar-io-jdbc-sqlite Pulsar IO :: Jdbc :: Sqlite - ${project.groupId} @@ -60,7 +58,6 @@ test - @@ -69,4 +66,4 @@ - \ No newline at end of file + diff --git a/pulsar-io/kafka-connect-adaptor-nar/pom.xml b/pulsar-io/kafka-connect-adaptor-nar/pom.xml index b3d547d333127..6255482116b8f 100644 --- a/pulsar-io/kafka-connect-adaptor-nar/pom.xml +++ b/pulsar-io/kafka-connect-adaptor-nar/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-kafka-connect-adaptor-nar Pulsar IO :: Kafka Connect Adaptor NAR - ${project.groupId} @@ -37,8 +35,6 @@ ${project.version} - - @@ -48,7 +44,30 @@ pulsar-io-kafka-connect-adaptor-${project.version} + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + - diff --git a/pulsar-io/kafka-connect-adaptor/pom.xml b/pulsar-io/kafka-connect-adaptor/pom.xml index 71a3cee11dcd0..7709d42645fe7 100644 --- a/pulsar-io/kafka-connect-adaptor/pom.xml +++ b/pulsar-io/kafka-connect-adaptor/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-kafka-connect-adaptor Pulsar IO :: Kafka Connect Adaptor - - ${project.groupId} pulsar-io-core ${project.version} provided - ${project.groupId} pulsar-common ${project.version} compile - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - org.apache.kafka connect-runtime @@ -73,21 +66,34 @@ org.eclipse.jetty * + + jose4j + org.bitbucket.b_c + - org.apache.kafka connect-json ${kafka-client.version} + + + jose4j + org.bitbucket.b_c + + - org.apache.kafka connect-api ${kafka-client.version} + + + jose4j + org.bitbucket.b_c + + - ${project.groupId} @@ -100,45 +106,50 @@ - io.netty netty-buffer - org.apache.commons commons-lang3 - io.confluent kafka-connect-avro-converter ${confluent.version} + + + org.apache.avro + avro + + - ${project.groupId} pulsar-broker ${project.version} test - org.apache.kafka connect-file ${kafka-client.version} test + + + jose4j + org.bitbucket.b_c + + - ${project.groupId} testmocks ${project.version} test - ${project.groupId} pulsar-broker @@ -146,38 +157,37 @@ test test-jar - org.apache.avro avro ${avro.version} provided - com.google.protobuf protobuf-java provided - org.asynchttpclient async-http-client test - org.bouncycastle bc-fips ${bouncycastle.bc-fips.version} test - com.typesafe.netty netty-reactive-streams test + + com.google.protobuf + protobuf-java + 3.5.1 + - diff --git a/pulsar-io/kafka/pom.xml b/pulsar-io/kafka/pom.xml index 28971ac282af1..167b92c276a44 100644 --- a/pulsar-io/kafka/pom.xml +++ b/pulsar-io/kafka/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-kafka Pulsar IO :: Kafka - @@ -44,79 +42,75 @@ - - + + ${project.groupId} + pulsar-io-common + ${project.version} + ${project.groupId} pulsar-io-core ${project.version} provided - ${project.groupId} pulsar-client-original ${project.version} - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.google.guava guava - org.apache.kafka kafka-clients ${kafka-client.version} + + + jose4j + org.bitbucket.b_c + + - io.confluent kafka-schema-registry-client ${confluent.version} - io.confluent kafka-avro-serializer ${confluent.version} - io.jsonwebtoken jjwt-impl - io.jsonwebtoken jjwt-jackson - org.hamcrest hamcrest test - org.awaitility awaitility test - - diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSink.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSink.java index 5ceea4dec8dca..2bedba928b756 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSink.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSink.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Map; -import java.util.Objects; import java.util.Properties; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -79,10 +78,7 @@ protected Properties beforeCreateProducer(Properties props) { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - kafkaSinkConfig = KafkaSinkConfig.load(config); - Objects.requireNonNull(kafkaSinkConfig.getTopic(), "Kafka topic is not set"); - Objects.requireNonNull(kafkaSinkConfig.getBootstrapServers(), "Kafka bootstrapServers is not set"); - Objects.requireNonNull(kafkaSinkConfig.getAcks(), "Kafka acks mode is not set"); + kafkaSinkConfig = KafkaSinkConfig.load(config, sinkContext); if (kafkaSinkConfig.getBatchSize() <= 0) { throw new IllegalArgumentException("Invalid Kafka Producer batchSize : " + kafkaSinkConfig.getBatchSize()); diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java index f8539518851aa..782f9d5d57dbb 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaAbstractSource.java @@ -66,7 +66,7 @@ public abstract class KafkaAbstractSource extends PushSource { @Override public void open(Map config, SourceContext sourceContext) throws Exception { - kafkaSourceConfig = KafkaSourceConfig.load(config); + kafkaSourceConfig = KafkaSourceConfig.load(config, sourceContext); Objects.requireNonNull(kafkaSourceConfig.getTopic(), "Kafka topic is not set"); Objects.requireNonNull(kafkaSourceConfig.getBootstrapServers(), "Kafka bootstrapServers is not set"); Objects.requireNonNull(kafkaSourceConfig.getGroupId(), "Kafka consumer group id is not set"); diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSinkConfig.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSinkConfig.java index 755b2c89c8f20..8f772a57d0c4d 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSinkConfig.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSinkConfig.java @@ -26,6 +26,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @Data @@ -84,12 +86,12 @@ public class KafkaSinkConfig implements Serializable { + " before considering a request complete. This controls the durability of records that are sent.") private String acks; @FieldDoc( - defaultValue = "16384L", + defaultValue = "16384", help = "The batch size that Kafka producer will attempt to batch records together" + " before sending them to brokers.") private long batchSize = 16384L; @FieldDoc( - defaultValue = "1048576L", + defaultValue = "1048576", help = "The maximum size of a Kafka request in bytes.") private long maxRequestSize = 1048576L; @@ -122,8 +124,7 @@ public static KafkaSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), KafkaSinkConfig.class); } - public static KafkaSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), KafkaSinkConfig.class); + public static KafkaSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, KafkaSinkConfig.class, sinkContext); } } \ No newline at end of file diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java index 5de60d2a028c8..3edfa27190339 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java @@ -27,6 +27,7 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @Data @@ -151,8 +152,14 @@ public static KafkaSourceConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), KafkaSourceConfig.class); } - public static KafkaSourceConfig load(Map map) throws IOException { + public static KafkaSourceConfig load(Map map, SourceContext sourceContext) throws IOException { ObjectMapper mapper = new ObjectMapper(); + // since the KafkaSourceConfig requires the ACCEPT_EMPTY_STRING_AS_NULL_OBJECT feature + // We manually set the sensitive fields here instead of calling `IOConfigUtils.loadWithSecrets` + String sslTruststorePassword = sourceContext.getSecret("sslTruststorePassword"); + if (sslTruststorePassword != null) { + map.put("sslTruststorePassword", sslTruststorePassword); + } mapper.enable(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT); return mapper.readValue(mapper.writeValueAsString(map), KafkaSourceConfig.class); } diff --git a/pulsar-io/kafka/src/main/resources/findbugsExclude.xml b/pulsar-io/kafka/src/main/resources/findbugsExclude.xml index 3fc8f67c8640e..a48959506a69a 100644 --- a/pulsar-io/kafka/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/kafka/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-kinesis Pulsar IO :: Kinesis - 2.2.8 0.14.0 @@ -37,116 +35,96 @@ 1.9.0 2.3.0 - ${project.groupId} pulsar-io-common ${project.version} - ${project.groupId} pulsar-io-core ${project.version} - ${project.groupId} pulsar-io-aws ${project.version} - - org.apache.pulsar + io.streamnative pulsar-functions-instance ${project.version} test - org.apache.commons commons-lang3 - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.fasterxml.jackson.dataformat jackson-dataformat-cbor - org.apache.avro avro ${avro.version} - com.github.wnameless.json json-flattener ${json-flattener.version} - com.google.code.gson gson - com.amazonaws aws-java-sdk-core - software.amazon.kinesis amazon-kinesis-client ${amazon-kinesis-client.version} - com.amazonaws amazon-kinesis-producer ${amazon-kinesis-producer.version} - - + com.google.flatbuffers flatbuffers-java ${flatbuffers-java.version} - javax.xml.bind jaxb-api ${jaxb-api.version} - org.testcontainers localstack test - org.awaitility awaitility test - - @@ -155,5 +133,4 @@ - diff --git a/pulsar-io/mongo/pom.xml b/pulsar-io/mongo/pom.xml index 214e76bad7927..875a2956b9381 100644 --- a/pulsar-io/mongo/pom.xml +++ b/pulsar-io/mongo/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar - pulsar-io - 3.1.0-SNAPSHOT + io.streamnative + pulsar-io + 3.1.0-SNAPSHOT - pulsar-io-mongo Pulsar IO :: MongoDB - 4.1.2 - + + ${project.groupId} + pulsar-io-common + ${project.version} + ${project.parent.groupId} pulsar-io-core @@ -64,7 +64,6 @@ guava - diff --git a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoAbstractConfig.java b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoAbstractConfig.java index 35c327ed82b99..74f077da62036 100644 --- a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoAbstractConfig.java +++ b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoAbstractConfig.java @@ -24,7 +24,6 @@ import java.io.Serializable; import lombok.Data; import lombok.experimental.Accessors; -import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.io.core.annotations.FieldDoc; /** @@ -42,6 +41,7 @@ public abstract class MongoAbstractConfig implements Serializable { @FieldDoc( required = true, + sensitive = true, // it may contain password defaultValue = "", help = "The URI of MongoDB that the connector connects to " + "(see: https://docs.mongodb.com/manual/reference/connection-string/)" @@ -95,7 +95,6 @@ public MongoAbstractConfig( } public void validate() { - checkArgument(!StringUtils.isEmpty(getMongoUri()), "Required MongoDB URI is not set."); checkArgument(getBatchSize() > 0, "batchSize must be a positive integer."); checkArgument(getBatchTimeMs() > 0, "batchTimeMs must be a positive long."); } diff --git a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSink.java b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSink.java index 2206d232eaf97..61d5aeb697e01 100644 --- a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSink.java +++ b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSink.java @@ -86,7 +86,7 @@ public MongoSink(Supplier clientProvider) { public void open(Map config, SinkContext sinkContext) throws Exception { log.info("Open MongoDB Sink"); - mongoSinkConfig = MongoSinkConfig.load(config); + mongoSinkConfig = MongoSinkConfig.load(config, sinkContext); mongoSinkConfig.validate(); if (clientProvider != null) { diff --git a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSinkConfig.java b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSinkConfig.java index 285f3c97bef1a..9431fe4910800 100644 --- a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSinkConfig.java +++ b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSinkConfig.java @@ -30,6 +30,8 @@ import lombok.EqualsAndHashCode; import lombok.experimental.Accessors; import org.apache.commons.lang3.StringUtils; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; /** * Configuration class for the MongoDB Sink Connectors. @@ -59,11 +61,8 @@ public static MongoSinkConfig load(String yamlFile) throws IOException { return cfg; } - public static MongoSinkConfig load(Map map) throws IOException { - final ObjectMapper mapper = new ObjectMapper(); - final MongoSinkConfig cfg = mapper.readValue(mapper.writeValueAsString(map), MongoSinkConfig.class); - - return cfg; + public static MongoSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, MongoSinkConfig.class, sinkContext); } @Override diff --git a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSource.java b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSource.java index 6ee95fc4cd4b5..68a31b461a51c 100644 --- a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSource.java +++ b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSource.java @@ -79,7 +79,7 @@ public MongoSource(Supplier clientProvider) { public void open(Map config, SourceContext sourceContext) throws Exception { log.info("Open MongoDB Source"); - mongoSourceConfig = MongoSourceConfig.load(config); + mongoSourceConfig = MongoSourceConfig.load(config, sourceContext); mongoSourceConfig.validate(); if (clientProvider != null) { diff --git a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSourceConfig.java b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSourceConfig.java index cf887a93bf3c3..1c0c7f4b3657a 100644 --- a/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSourceConfig.java +++ b/pulsar-io/mongo/src/main/java/org/apache/pulsar/io/mongodb/MongoSourceConfig.java @@ -29,6 +29,8 @@ import lombok.EqualsAndHashCode; import lombok.experimental.Accessors; import org.apache.commons.lang3.StringUtils; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.core.annotations.FieldDoc; /** @@ -75,12 +77,8 @@ public static MongoSourceConfig load(String yamlFile) throws IOException { return cfg; } - public static MongoSourceConfig load(Map map) throws IOException { - final ObjectMapper mapper = new ObjectMapper(); - final MongoSourceConfig cfg = - mapper.readValue(mapper.writeValueAsString(map), MongoSourceConfig.class); - - return cfg; + public static MongoSourceConfig load(Map map, SourceContext sourceContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, MongoSourceConfig.class, sourceContext); } /** diff --git a/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSinkConfigTest.java b/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSinkConfigTest.java index b1166eac5722a..c86e45feb2348 100644 --- a/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSinkConfigTest.java +++ b/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSinkConfigTest.java @@ -19,6 +19,8 @@ package org.apache.pulsar.io.mongodb; import java.util.Map; +import org.apache.pulsar.io.core.SinkContext; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -34,7 +36,27 @@ public void testLoadMapConfig() throws IOException { commonConfigMap.put("batchSize", TestHelper.BATCH_SIZE); commonConfigMap.put("batchTimeMs", TestHelper.BATCH_TIME); - final MongoSinkConfig cfg = MongoSinkConfig.load(commonConfigMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(commonConfigMap, sinkContext); + + assertEquals(cfg.getMongoUri(), TestHelper.URI); + assertEquals(cfg.getDatabase(), TestHelper.DB); + assertEquals(cfg.getCollection(), TestHelper.COLL); + assertEquals(cfg.getBatchSize(), TestHelper.BATCH_SIZE); + assertEquals(cfg.getBatchTimeMs(), TestHelper.BATCH_TIME); + } + + @Test + public void testLoadMapConfigUrlFromSecret() throws IOException { + final Map commonConfigMap = TestHelper.createCommonConfigMap(); + commonConfigMap.put("batchSize", TestHelper.BATCH_SIZE); + commonConfigMap.put("batchTimeMs", TestHelper.BATCH_TIME); + commonConfigMap.remove("mongoUri"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("mongoUri")) + .thenReturn(TestHelper.URI); + final MongoSinkConfig cfg = MongoSinkConfig.load(commonConfigMap, sinkContext); assertEquals(cfg.getMongoUri(), TestHelper.URI); assertEquals(cfg.getDatabase(), TestHelper.DB); @@ -44,12 +66,13 @@ public void testLoadMapConfig() throws IOException { } @Test(expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "Required MongoDB URI is not set.") + expectedExceptionsMessageRegExp = "mongoUri cannot be null") public void testBadMongoUri() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.removeMongoUri(configMap); - final MongoSinkConfig cfg = MongoSinkConfig.load(configMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(configMap, sinkContext); cfg.validate(); } @@ -60,7 +83,8 @@ public void testBadDatabase() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.removeDatabase(configMap); - final MongoSinkConfig cfg = MongoSinkConfig.load(configMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(configMap, sinkContext); cfg.validate(); } @@ -71,7 +95,8 @@ public void testBadCollection() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.removeCollection(configMap); - final MongoSinkConfig cfg = MongoSinkConfig.load(configMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(configMap, sinkContext); cfg.validate(); } @@ -82,7 +107,8 @@ public void testBadBatchSize() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putBatchSize(configMap, 0); - final MongoSinkConfig cfg = MongoSinkConfig.load(configMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(configMap, sinkContext); cfg.validate(); } @@ -93,7 +119,8 @@ public void testBadBatchTime() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putBatchTime(configMap, 0L); - final MongoSinkConfig cfg = MongoSinkConfig.load(configMap); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + final MongoSinkConfig cfg = MongoSinkConfig.load(configMap, sinkContext); cfg.validate(); } diff --git a/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSourceConfigTest.java b/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSourceConfigTest.java index e7fd01549b033..528cd0237ef16 100644 --- a/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSourceConfigTest.java +++ b/pulsar-io/mongo/src/test/java/org/apache/pulsar/io/mongodb/MongoSourceConfigTest.java @@ -23,6 +23,8 @@ import java.io.File; import java.io.IOException; import java.util.Map; +import org.apache.pulsar.io.core.SourceContext; +import org.mockito.Mockito; import org.testng.annotations.Test; public class MongoSourceConfigTest { @@ -32,7 +34,27 @@ public void testLoadMapConfig() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putSyncType(configMap, TestHelper.SYNC_TYPE); - final MongoSourceConfig cfg = MongoSourceConfig.load(configMap); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); + + assertEquals(cfg.getMongoUri(), TestHelper.URI); + assertEquals(cfg.getDatabase(), TestHelper.DB); + assertEquals(cfg.getCollection(), TestHelper.COLL); + assertEquals(cfg.getSyncType(), TestHelper.SYNC_TYPE); + assertEquals(cfg.getBatchSize(), TestHelper.BATCH_SIZE); + assertEquals(cfg.getBatchTimeMs(), TestHelper.BATCH_TIME); + } + + @Test + public void testLoadMapConfigUriFromSecret() throws IOException { + final Map configMap = TestHelper.createCommonConfigMap(); + TestHelper.putSyncType(configMap, TestHelper.SYNC_TYPE); + configMap.remove("mongoUri"); + + SourceContext sourceContext = Mockito.mock(SourceContext.class); + Mockito.when(sourceContext.getSecret("mongoUri")) + .thenReturn(TestHelper.URI); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); assertEquals(cfg.getMongoUri(), TestHelper.URI); assertEquals(cfg.getDatabase(), TestHelper.DB); @@ -43,12 +65,13 @@ public void testLoadMapConfig() throws IOException { } @Test(expectedExceptions = IllegalArgumentException.class, - expectedExceptionsMessageRegExp = "Required MongoDB URI is not set.") + expectedExceptionsMessageRegExp = "mongoUri cannot be null") public void testBadMongoUri() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.removeMongoUri(configMap); - final MongoSourceConfig cfg = MongoSourceConfig.load(configMap); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); cfg.validate(); } @@ -61,7 +84,8 @@ public void testBadSyncType() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putSyncType(configMap, "wrong_sync_type_str"); - final MongoSourceConfig cfg = MongoSourceConfig.load(configMap); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); cfg.validate(); } @@ -72,7 +96,8 @@ public void testBadBatchSize() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putBatchSize(configMap, 0); - final MongoSourceConfig cfg = MongoSourceConfig.load(configMap); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); cfg.validate(); } @@ -83,7 +108,8 @@ public void testBadBatchTime() throws IOException { final Map configMap = TestHelper.createCommonConfigMap(); TestHelper.putBatchTime(configMap, 0L); - final MongoSourceConfig cfg = MongoSourceConfig.load(configMap); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + final MongoSourceConfig cfg = MongoSourceConfig.load(configMap, sourceContext); cfg.validate(); } diff --git a/pulsar-io/netty/pom.xml b/pulsar-io/netty/pom.xml index dcbc025510b67..56834efb878a5 100644 --- a/pulsar-io/netty/pom.xml +++ b/pulsar-io/netty/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - org.apache.pulsar - pulsar-io - 3.1.0-SNAPSHOT - - - pulsar-io-netty - Pulsar IO :: Netty - - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - - com.fasterxml.jackson.core - jackson-databind - - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - - io.netty - netty-handler - - - - io.netty - netty-codec-http - - - - com.google.guava - guava - - - - org.apache.commons - commons-lang3 - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - spotbugs - verify - - check - - - - - - - + + 4.0.0 + + io.streamnative + pulsar-io + 3.1.0-SNAPSHOT + + pulsar-io-netty + Pulsar IO :: Netty + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + io.netty + netty-handler + + + io.netty + netty-codec-http + + + com.google.guava + guava + + + org.apache.commons + commons-lang3 + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + spotbugs + verify + + check + + + + + + diff --git a/pulsar-io/netty/src/main/resources/findbugsExclude.xml b/pulsar-io/netty/src/main/resources/findbugsExclude.xml index 3a52062fc9233..6b8906a775bd3 100644 --- a/pulsar-io/netty/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/netty/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - - - + + + + + + + + + + + + diff --git a/pulsar-io/nsq/pom.xml b/pulsar-io/nsq/pom.xml index f6bed0c7a74b4..a79d424ec4b62 100644 --- a/pulsar-io/nsq/pom.xml +++ b/pulsar-io/nsq/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-nsq Pulsar IO :: NSQ - ${project.groupId} pulsar-io-core ${project.version} - com.sproutsocial nsq-j ${nsq-client.version} - org.apache.commons commons-collections4 - org.apache.commons commons-lang3 - ${project.groupId} pulsar-io-common ${project.version} - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - - - - - org.apache.nifi - nifi-nar-maven-plugin - - + + + org.apache.nifi + nifi-nar-maven-plugin + + diff --git a/pulsar-io/pom.xml b/pulsar-io/pom.xml index c1a58d059cd48..8b005ef65df78 100644 --- a/pulsar-io/pom.xml +++ b/pulsar-io/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT - pulsar-io Pulsar IO :: Parent - main @@ -77,7 +75,6 @@ alluxio - pulsar-io-tests @@ -110,7 +107,6 @@ alluxio - pulsar-io-elastic-tests @@ -119,7 +115,6 @@ elastic-search - pulsar-io-kafka-connect-tests @@ -131,7 +126,6 @@ debezium - core-modules @@ -145,7 +139,6 @@ - @@ -163,5 +156,4 @@ - diff --git a/pulsar-io/rabbitmq/pom.xml b/pulsar-io/rabbitmq/pom.xml index 3cdcb681b3fa8..6685b297e5921 100644 --- a/pulsar-io/rabbitmq/pom.xml +++ b/pulsar-io/rabbitmq/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-rabbitmq Pulsar IO :: RabbitMQ - - + + ${project.groupId} + pulsar-io-common + ${project.version} + ${project.groupId} pulsar-io-core @@ -53,28 +55,23 @@ pulsar-client-original ${project.version} - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.google.guava guava - com.rabbitmq amqp-client ${rabbitmq-client.version} - org.apache.qpid qpid-broker @@ -86,9 +83,7 @@ awaitility test - - @@ -97,5 +92,4 @@ - diff --git a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSink.java b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSink.java index f317a35734e69..89192c42346e8 100644 --- a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSink.java +++ b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSink.java @@ -53,7 +53,7 @@ public class RabbitMQSink implements Sink { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - rabbitMQSinkConfig = RabbitMQSinkConfig.load(config); + rabbitMQSinkConfig = RabbitMQSinkConfig.load(config, sinkContext); rabbitMQSinkConfig.validate(); ConnectionFactory connectionFactory = rabbitMQSinkConfig.createConnectionFactory(); diff --git a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSinkConfig.java b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSinkConfig.java index c1f8d6b8ad3d3..39f97e5e460c8 100644 --- a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSinkConfig.java +++ b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSinkConfig.java @@ -20,7 +20,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; -import com.google.common.base.Preconditions; import java.io.File; import java.io.IOException; import java.io.Serializable; @@ -28,6 +27,8 @@ import lombok.Data; import lombok.EqualsAndHashCode; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @Data @@ -60,14 +61,12 @@ public static RabbitMQSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), RabbitMQSinkConfig.class); } - public static RabbitMQSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), RabbitMQSinkConfig.class); + public static RabbitMQSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, RabbitMQSinkConfig.class, sinkContext); } @Override public void validate() { super.validate(); - Preconditions.checkNotNull(exchangeName, "exchangeName property not set."); } } diff --git a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSource.java b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSource.java index d15108c4d8288..b0b7ef31b08de 100644 --- a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSource.java +++ b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSource.java @@ -54,7 +54,7 @@ public class RabbitMQSource extends PushSource { @Override public void open(Map config, SourceContext sourceContext) throws Exception { - rabbitMQSourceConfig = RabbitMQSourceConfig.load(config); + rabbitMQSourceConfig = RabbitMQSourceConfig.load(config, sourceContext); rabbitMQSourceConfig.validate(); ConnectionFactory connectionFactory = rabbitMQSourceConfig.createConnectionFactory(); diff --git a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSourceConfig.java b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSourceConfig.java index f24018e70da13..01e23a7146080 100644 --- a/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSourceConfig.java +++ b/pulsar-io/rabbitmq/src/main/java/org/apache/pulsar/io/rabbitmq/RabbitMQSourceConfig.java @@ -28,6 +28,8 @@ import lombok.Data; import lombok.EqualsAndHashCode; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.core.annotations.FieldDoc; @Data @@ -66,9 +68,8 @@ public static RabbitMQSourceConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), RabbitMQSourceConfig.class); } - public static RabbitMQSourceConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), RabbitMQSourceConfig.class); + public static RabbitMQSourceConfig load(Map map, SourceContext sourceContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, RabbitMQSourceConfig.class, sourceContext); } @Override diff --git a/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/sink/RabbitMQSinkConfigTest.java b/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/sink/RabbitMQSinkConfigTest.java index 3d4fd6f46e16f..8706cb567524f 100644 --- a/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/sink/RabbitMQSinkConfigTest.java +++ b/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/sink/RabbitMQSinkConfigTest.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.io.rabbitmq.sink; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.rabbitmq.RabbitMQSinkConfig; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -71,7 +73,45 @@ public final void loadFromMapTest() throws IOException { map.put("exchangeName", "test-exchange"); map.put("exchangeType", "test-exchange-type"); - RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map, sinkContext); + assertNotNull(config); + assertEquals(config.getHost(), "localhost"); + assertEquals(config.getPort(), Integer.parseInt("5673")); + assertEquals(config.getVirtualHost(), "/"); + assertEquals(config.getUsername(), "guest"); + assertEquals(config.getPassword(), "guest"); + assertEquals(config.getConnectionName(), "test-connection"); + assertEquals(config.getRequestedChannelMax(), Integer.parseInt("0")); + assertEquals(config.getRequestedFrameMax(), Integer.parseInt("0")); + assertEquals(config.getConnectionTimeout(), Integer.parseInt("60000")); + assertEquals(config.getHandshakeTimeout(), Integer.parseInt("10000")); + assertEquals(config.getRequestedHeartbeat(), Integer.parseInt("60")); + assertEquals(config.getExchangeName(), "test-exchange"); + assertEquals(config.getExchangeType(), "test-exchange-type"); + } + + @Test + public final void loadFromMapCredentialsFromSecretTest() throws IOException { + Map map = new HashMap<>(); + map.put("host", "localhost"); + map.put("port", "5673"); + map.put("virtualHost", "/"); + map.put("connectionName", "test-connection"); + map.put("requestedChannelMax", "0"); + map.put("requestedFrameMax", "0"); + map.put("connectionTimeout", "60000"); + map.put("handshakeTimeout", "10000"); + map.put("requestedHeartbeat", "60"); + map.put("exchangeName", "test-exchange"); + map.put("exchangeType", "test-exchange-type"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("username")) + .thenReturn("guest"); + Mockito.when(sinkContext.getSecret("password")) + .thenReturn("guest"); + RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map, sinkContext); assertNotNull(config); assertEquals(config.getHost(), "localhost"); assertEquals(config.getPort(), Integer.parseInt("5673")); @@ -105,12 +145,13 @@ public final void validValidateTest() throws IOException { map.put("exchangeName", "test-exchange"); map.put("exchangeType", "test-exchange-type"); - RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map, sinkContext); config.validate(); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "exchangeName property not set.") + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "exchangeName cannot be null") public final void missingExchangeValidateTest() throws IOException { Map map = new HashMap<>(); map.put("host", "localhost"); @@ -126,7 +167,8 @@ public final void missingExchangeValidateTest() throws IOException { map.put("requestedHeartbeat", "60"); map.put("exchangeType", "test-exchange-type"); - RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RabbitMQSinkConfig config = RabbitMQSinkConfig.load(map, sinkContext); config.validate(); } diff --git a/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/source/RabbitMQSourceConfigTest.java b/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/source/RabbitMQSourceConfigTest.java index c33e0070c6fd0..43a90062fa453 100644 --- a/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/source/RabbitMQSourceConfigTest.java +++ b/pulsar-io/rabbitmq/src/test/java/org/apache/pulsar/io/rabbitmq/source/RabbitMQSourceConfigTest.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.io.rabbitmq.source; +import org.apache.pulsar.io.core.SourceContext; import org.apache.pulsar.io.rabbitmq.RabbitMQSourceConfig; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -76,7 +78,50 @@ public final void loadFromMapTest() throws IOException { map.put("prefetchGlobal", "false"); map.put("passive", "true"); - RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map, sourceContext); + assertNotNull(config); + assertEquals("localhost", config.getHost()); + assertEquals(Integer.parseInt("5672"), config.getPort()); + assertEquals("/", config.getVirtualHost()); + assertEquals("guest", config.getUsername()); + assertEquals("guest", config.getPassword()); + assertEquals("test-queue", config.getQueueName()); + assertEquals("test-connection", config.getConnectionName()); + assertEquals(Integer.parseInt("0"), config.getRequestedChannelMax()); + assertEquals(Integer.parseInt("0"), config.getRequestedFrameMax()); + assertEquals(Integer.parseInt("60000"), config.getConnectionTimeout()); + assertEquals(Integer.parseInt("10000"), config.getHandshakeTimeout()); + assertEquals(Integer.parseInt("60"), config.getRequestedHeartbeat()); + assertEquals(Integer.parseInt("0"), config.getPrefetchCount()); + assertEquals(false, config.isPrefetchGlobal()); + assertEquals(false, config.isPrefetchGlobal()); + assertEquals(true, config.isPassive()); + } + + @Test + public final void loadFromMapCredentialsFromSecretTest() throws IOException { + Map map = new HashMap<>(); + map.put("host", "localhost"); + map.put("port", "5672"); + map.put("virtualHost", "/"); + map.put("queueName", "test-queue"); + map.put("connectionName", "test-connection"); + map.put("requestedChannelMax", "0"); + map.put("requestedFrameMax", "0"); + map.put("connectionTimeout", "60000"); + map.put("handshakeTimeout", "10000"); + map.put("requestedHeartbeat", "60"); + map.put("prefetchCount", "0"); + map.put("prefetchGlobal", "false"); + map.put("passive", "true"); + + SourceContext sourceContext = Mockito.mock(SourceContext.class); + Mockito.when(sourceContext.getSecret("username")) + .thenReturn("guest"); + Mockito.when(sourceContext.getSecret("password")) + .thenReturn("guest"); + RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map, sourceContext); assertNotNull(config); assertEquals("localhost", config.getHost()); assertEquals(Integer.parseInt("5672"), config.getPort()); @@ -115,12 +160,13 @@ public final void validValidateTest() throws IOException { map.put("prefetchGlobal", "false"); map.put("passive", "false"); - RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map, sourceContext); config.validate(); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "host property not set.") + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "host cannot be null") public final void missingHostValidateTest() throws IOException { Map map = new HashMap<>(); map.put("port", "5672"); @@ -138,7 +184,8 @@ public final void missingHostValidateTest() throws IOException { map.put("prefetchGlobal", "false"); map.put("passive", "false"); - RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map, sourceContext); config.validate(); } @@ -162,7 +209,8 @@ public final void invalidPrefetchCountTest() throws IOException { map.put("prefetchGlobal", "false"); map.put("passive", "false"); - RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map); + SourceContext sourceContext = Mockito.mock(SourceContext.class); + RabbitMQSourceConfig config = RabbitMQSourceConfig.load(map, sourceContext); config.validate(); } diff --git a/pulsar-io/redis/pom.xml b/pulsar-io/redis/pom.xml index 351ae205e667f..8f0e000129289 100644 --- a/pulsar-io/redis/pom.xml +++ b/pulsar-io/redis/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - pulsar-io - org.apache.pulsar - 3.1.0-SNAPSHOT - - - pulsar-io-redis - Pulsar IO :: Redis - - - - ${project.groupId} - pulsar-io-core - ${project.version} - - - ${project.groupId} - pulsar-functions-instance - ${project.version} - - - - ${project.groupId} - pulsar-client-original - ${project.version} - - - io.lettuce - lettuce-core - 5.0.2.RELEASE - - - com.google.guava - guava - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - - - org.apache.commons - commons-lang3 - - - org.apache.commons - commons-collections4 - - - com.github.kstyrc - embedded-redis - 0.6 - test - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - - \ No newline at end of file + + 4.0.0 + + pulsar-io + io.streamnative + 3.1.0-SNAPSHOT + + pulsar-io-redis + Pulsar IO :: Redis + + + ${project.groupId} + pulsar-io-common + ${project.version} + + + ${project.groupId} + pulsar-io-core + ${project.version} + + + ${project.groupId} + pulsar-functions-instance + ${project.version} + + + ${project.groupId} + pulsar-client-original + ${project.version} + + + io.lettuce + lettuce-core + 5.0.2.RELEASE + + + com.google.guava + guava + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + org.apache.commons + commons-lang3 + + + org.apache.commons + commons-collections4 + + + com.github.kstyrc + embedded-redis + 0.6 + test + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + + diff --git a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/RedisAbstractConfig.java b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/RedisAbstractConfig.java index 978e7de31a51c..89ec684dded72 100644 --- a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/RedisAbstractConfig.java +++ b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/RedisAbstractConfig.java @@ -88,13 +88,11 @@ public class RedisAbstractConfig implements Serializable { @FieldDoc( required = false, - defaultValue = "10000L", + defaultValue = "10000", help = "The amount of time in milliseconds to wait before timing out when connecting") private long connectTimeout = 10000L; public void validate() { - Preconditions.checkNotNull(redisHosts, "redisHosts property not set."); - Preconditions.checkNotNull(redisDatabase, "redisDatabase property not set."); Preconditions.checkNotNull(clientMode, "clientMode property not set."); } @@ -105,7 +103,6 @@ public enum ClientMode { public List getHostAndPorts() { List hostAndPorts = Lists.newArrayList(); - Preconditions.checkNotNull(redisHosts, "redisHosts property not set."); String[] hosts = StringUtils.split(redisHosts, ","); for (String host : hosts) { HostAndPort hostAndPort = HostAndPort.fromString(host); diff --git a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSink.java b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSink.java index bff0a5c2da592..ebd6e9dbab272 100644 --- a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSink.java +++ b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSink.java @@ -68,7 +68,7 @@ public class RedisSink implements Sink { public void open(Map config, SinkContext sinkContext) throws Exception { log.info("Open Redis Sink"); - redisSinkConfig = RedisSinkConfig.load(config); + redisSinkConfig = RedisSinkConfig.load(config, sinkContext); redisSinkConfig.validate(); redisSession = RedisSession.create(redisSinkConfig); diff --git a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSinkConfig.java b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSinkConfig.java index a9db66812a475..f7a70cb65a826 100644 --- a/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSinkConfig.java +++ b/pulsar-io/redis/src/main/java/org/apache/pulsar/io/redis/sink/RedisSinkConfig.java @@ -28,6 +28,8 @@ import lombok.Data; import lombok.EqualsAndHashCode; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; import org.apache.pulsar.io.redis.RedisAbstractConfig; @@ -40,13 +42,13 @@ public class RedisSinkConfig extends RedisAbstractConfig implements Serializable @FieldDoc( required = false, - defaultValue = "10000L", + defaultValue = "10000", help = "The amount of time in milliseconds before an operation is marked as timed out") private long operationTimeout = 10000L; @FieldDoc( required = false, - defaultValue = "1000L", + defaultValue = "1000", help = "The Redis operation time in milliseconds") private long batchTimeMs = 1000L; @@ -62,9 +64,8 @@ public static RedisSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), RedisSinkConfig.class); } - public static RedisSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), RedisSinkConfig.class); + public static RedisSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, RedisSinkConfig.class, sinkContext); } @Override diff --git a/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkConfigTest.java b/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkConfigTest.java index 1316d0994a1cd..39fc6e540c242 100644 --- a/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkConfigTest.java +++ b/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkConfigTest.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.io.redis.sink; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.redis.RedisAbstractConfig; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -62,7 +64,34 @@ public final void loadFromMapTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("connectTimeout", "3000"); - RedisSinkConfig config = RedisSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); + assertNotNull(config); + assertEquals(config.getRedisHosts(), "localhost:6379"); + assertEquals(config.getRedisPassword(), "fake@123"); + assertEquals(config.getRedisDatabase(), Integer.parseInt("1")); + assertEquals(config.getClientMode(), "Standalone"); + assertEquals(config.getOperationTimeout(), Long.parseLong("2000")); + assertEquals(config.getBatchSize(), Integer.parseInt("100")); + assertEquals(config.getBatchTimeMs(), Long.parseLong("1000")); + assertEquals(config.getConnectTimeout(), Long.parseLong("3000")); + } + + @Test + public final void loadFromMapCredentialsFromSecretTest() throws IOException { + Map map = new HashMap(); + map.put("redisHosts", "localhost:6379"); + map.put("redisDatabase", "1"); + map.put("clientMode", "Standalone"); + map.put("operationTimeout", "2000"); + map.put("batchSize", "100"); + map.put("batchTimeMs", "1000"); + map.put("connectTimeout", "3000"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("redisPassword")) + .thenReturn("fake@123"); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); assertNotNull(config); assertEquals(config.getRedisHosts(), "localhost:6379"); assertEquals(config.getRedisPassword(), "fake@123"); @@ -86,12 +115,13 @@ public final void validValidateTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("connectTimeout", "3000"); - RedisSinkConfig config = RedisSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); config.validate(); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "redisHosts property not set.") + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "redisHosts cannot be null") public final void missingValidValidateTableNameTest() throws IOException { Map map = new HashMap(); map.put("redisPassword", "fake@123"); @@ -102,7 +132,8 @@ public final void missingValidValidateTableNameTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("connectTimeout", "3000"); - RedisSinkConfig config = RedisSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); config.validate(); } @@ -119,7 +150,8 @@ public final void invalidBatchTimeMsTest() throws IOException { map.put("batchTimeMs", "-100"); map.put("connectTimeout", "3000"); - RedisSinkConfig config = RedisSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); config.validate(); } @@ -136,7 +168,8 @@ public final void invalidClientModeTest() throws IOException { map.put("batchTimeMs", "1000"); map.put("connectTimeout", "3000"); - RedisSinkConfig config = RedisSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + RedisSinkConfig config = RedisSinkConfig.load(map, sinkContext); config.validate(); RedisAbstractConfig.ClientMode.valueOf(config.getClientMode().toUpperCase()); diff --git a/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkTest.java b/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkTest.java index 214151345b42c..2b407fafa5e04 100644 --- a/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkTest.java +++ b/pulsar-io/redis/src/test/java/org/apache/pulsar/io/redis/sink/RedisSinkTest.java @@ -21,7 +21,9 @@ import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.instance.SinkRecord; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.redis.EmbeddedRedisUtils; +import org.mockito.Mockito; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -66,7 +68,8 @@ public void TestOpenAndWriteSink() throws Exception { Record record = build("fakeTopic", "fakeKey", "fakeValue"); // open should success - sink.open(configs, null); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + sink.open(configs, sinkContext); // write should success. sink.write(record); diff --git a/pulsar-io/solr/pom.xml b/pulsar-io/solr/pom.xml index 741c6d135c509..bf1039dd6d58e 100644 --- a/pulsar-io/solr/pom.xml +++ b/pulsar-io/solr/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - pulsar-io - org.apache.pulsar - 3.1.0-SNAPSHOT - - - - 8.11.1 - - - pulsar-io-solr - Pulsar IO :: Solr - - - - ${project.parent.groupId} - pulsar-io-core - ${project.parent.version} - - - ${project.groupId} - pulsar-functions-instance - ${project.version} - - - ${project.groupId} - pulsar-client-original - ${project.version} - - - org.apache.solr - solr-solrj - ${solr.version} - - - org.apache.solr - solr-core - ${solr.version} - test - - - org.apache.commons - commons-lang3 - - - org.apache.commons - commons-collections4 - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - spotbugs - verify - - check - - - - - - - - \ No newline at end of file + + 4.0.0 + + pulsar-io + io.streamnative + 3.1.0-SNAPSHOT + + + 8.11.3 + + pulsar-io-solr + Pulsar IO :: Solr + + + ${project.groupId} + pulsar-io-common + ${project.version} + + + ${project.parent.groupId} + pulsar-io-core + ${project.parent.version} + + + ${project.groupId} + pulsar-functions-instance + ${project.version} + + + ${project.groupId} + pulsar-client-original + ${project.version} + + + org.apache.solr + solr-solrj + ${solr.version} + + + org.apache.solr + solr-core + ${solr.version} + + + jose4j + org.bitbucket.b_c + + + test + + + org.apache.commons + commons-lang3 + + + org.apache.commons + commons-collections4 + + + + + + org.apache.nifi + nifi-nar-maven-plugin + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + spotbugs + verify + + check + + + + + + + diff --git a/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrAbstractSink.java b/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrAbstractSink.java index de9cdb4a9d82a..202c782c14c49 100644 --- a/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrAbstractSink.java +++ b/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrAbstractSink.java @@ -48,7 +48,7 @@ public abstract class SolrAbstractSink implements Sink { @Override public void open(Map config, SinkContext sinkContext) throws Exception { - solrSinkConfig = SolrSinkConfig.load(config); + solrSinkConfig = SolrSinkConfig.load(config, sinkContext); solrSinkConfig.validate(); enableBasicAuth = !Strings.isNullOrEmpty(solrSinkConfig.getUsername()); diff --git a/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrSinkConfig.java b/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrSinkConfig.java index 02733d230bdcb..daa93a366b110 100644 --- a/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrSinkConfig.java +++ b/pulsar-io/solr/src/main/java/org/apache/pulsar/io/solr/SolrSinkConfig.java @@ -27,6 +27,8 @@ import java.util.Map; import lombok.Data; import lombok.experimental.Accessors; +import org.apache.pulsar.io.common.IOConfigUtils; +import org.apache.pulsar.io.core.SinkContext; import org.apache.pulsar.io.core.annotations.FieldDoc; /** @@ -84,9 +86,8 @@ public static SolrSinkConfig load(String yamlFile) throws IOException { return mapper.readValue(new File(yamlFile), SolrSinkConfig.class); } - public static SolrSinkConfig load(Map map) throws IOException { - ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(mapper.writeValueAsString(map), SolrSinkConfig.class); + public static SolrSinkConfig load(Map map, SinkContext sinkContext) throws IOException { + return IOConfigUtils.loadWithSecrets(map, SolrSinkConfig.class, sinkContext); } public void validate() { diff --git a/pulsar-io/solr/src/main/resources/findbugsExclude.xml b/pulsar-io/solr/src/main/resources/findbugsExclude.xml index f49ff86c90caf..47c3d73af5f06 100644 --- a/pulsar-io/solr/src/main/resources/findbugsExclude.xml +++ b/pulsar-io/solr/src/main/resources/findbugsExclude.xml @@ -1,22 +1,23 @@ - - - \ No newline at end of file + + + + diff --git a/pulsar-io/solr/src/test/java/org/apache/pulsar/io/solr/SolrSinkConfigTest.java b/pulsar-io/solr/src/test/java/org/apache/pulsar/io/solr/SolrSinkConfigTest.java index 42d2121dbfcbd..2c2447a637d35 100644 --- a/pulsar-io/solr/src/test/java/org/apache/pulsar/io/solr/SolrSinkConfigTest.java +++ b/pulsar-io/solr/src/test/java/org/apache/pulsar/io/solr/SolrSinkConfigTest.java @@ -19,6 +19,8 @@ package org.apache.pulsar.io.solr; import com.google.common.collect.Lists; +import org.apache.pulsar.io.core.SinkContext; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.io.File; @@ -61,7 +63,31 @@ public final void loadFromMapTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); + assertNotNull(config); + assertEquals(config.getSolrUrl(), "localhost:2181,localhost:2182/chroot"); + assertEquals(config.getSolrMode(), "SolrCloud"); + assertEquals(config.getSolrCollection(), "techproducts"); + assertEquals(config.getSolrCommitWithinMs(), Integer.parseInt("100")); + assertEquals(config.getUsername(), "fakeuser"); + assertEquals(config.getPassword(), "fake@123"); + } + + @Test + public final void loadFromMapCredentialsFromSecretTest() throws IOException { + Map map = new HashMap<>(); + map.put("solrUrl", "localhost:2181,localhost:2182/chroot"); + map.put("solrMode", "SolrCloud"); + map.put("solrCollection", "techproducts"); + map.put("solrCommitWithinMs", "100"); + + SinkContext sinkContext = Mockito.mock(SinkContext.class); + Mockito.when(sinkContext.getSecret("username")) + .thenReturn("fakeuser"); + Mockito.when(sinkContext.getSecret("password")) + .thenReturn("fake@123"); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); assertNotNull(config); assertEquals(config.getSolrUrl(), "localhost:2181,localhost:2182/chroot"); assertEquals(config.getSolrMode(), "SolrCloud"); @@ -81,12 +107,13 @@ public final void validValidateTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); config.validate(); } - @Test(expectedExceptions = NullPointerException.class, - expectedExceptionsMessageRegExp = "solrUrl property not set.") + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "solrUrl cannot be null") public final void missingValidValidateSolrModeTest() throws IOException { Map map = new HashMap<>(); map.put("solrMode", "SolrCloud"); @@ -95,7 +122,8 @@ public final void missingValidValidateSolrModeTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); config.validate(); } @@ -110,7 +138,8 @@ public final void invalidBatchTimeMsTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); config.validate(); } @@ -125,7 +154,8 @@ public final void invalidClientModeTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); config.validate(); SolrAbstractSink.SolrMode.valueOf(config.getSolrMode().toUpperCase()); @@ -141,7 +171,8 @@ public final void validZkChrootTest() throws IOException { map.put("username", "fakeuser"); map.put("password", "fake@123"); - SolrSinkConfig config = SolrSinkConfig.load(map); + SinkContext sinkContext = Mockito.mock(SinkContext.class); + SolrSinkConfig config = SolrSinkConfig.load(map, sinkContext); config.validate(); String url = config.getSolrUrl(); diff --git a/pulsar-io/solr/src/test/resources/solr.xml b/pulsar-io/solr/src/test/resources/solr.xml index 495a71f72f3f2..9ce4d37f4beef 100644 --- a/pulsar-io/solr/src/test/resources/solr.xml +++ b/pulsar-io/solr/src/test/resources/solr.xml @@ -1,3 +1,4 @@ + - - ${host:} - ${jetty.port:8983} - ${hostContext:solr} - ${genericCoreNodeNames:true} - ${zkClientTimeout:30000} - ${distribUpdateSoTimeout:600000} - ${distribUpdateConnTimeout:60000} - ${zkCredentialsProvider:org.apache.solr.common.cloud.DefaultZkCredentialsProvider} - ${zkACLProvider:org.apache.solr.common.cloud.DefaultZkACLProvider} - - - ${socketTimeout:600000} - ${connTimeout:60000} - - \ No newline at end of file + + ${host:} + ${jetty.port:8983} + ${hostContext:solr} + ${genericCoreNodeNames:true} + ${zkClientTimeout:30000} + ${distribUpdateSoTimeout:600000} + ${distribUpdateConnTimeout:60000} + ${zkCredentialsProvider:org.apache.solr.common.cloud.DefaultZkCredentialsProvider} + ${zkACLProvider:org.apache.solr.common.cloud.DefaultZkACLProvider} + + + ${socketTimeout:600000} + ${connTimeout:60000} + + diff --git a/pulsar-io/twitter/pom.xml b/pulsar-io/twitter/pom.xml index 03b9394c44109..41374d0fee261 100644 --- a/pulsar-io/twitter/pom.xml +++ b/pulsar-io/twitter/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar-io 3.1.0-SNAPSHOT - pulsar-io-twitter Pulsar IO :: Twitter - - ${project.groupId} pulsar-io-core ${project.version} - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.twitter hbc-core ${hbc-core.version} - org.apache.commons commons-collections4 - org.apache.commons commons-lang3 - - ${project.groupId} - pulsar-io-common - ${project.version} + ${project.groupId} + pulsar-io-common + ${project.version} - - @@ -80,5 +69,4 @@ - diff --git a/pulsar-metadata/pom.xml b/pulsar-metadata/pom.xml index a494fa1c35f8f..7d5db0f2018c1 100644 --- a/pulsar-metadata/pom.xml +++ b/pulsar-metadata/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-metadata Pulsar Metadata - org.apache.pulsar + io.streamnative pulsar-common ${project.version} - org.apache.zookeeper zookeeper @@ -56,56 +53,69 @@ - - io.dropwizard.metrics metrics-core test - - + + org.apache.zookeeper + zookeeper + tests + test + + + ch.qos.logback + logback-core + + + ch.qos.logback + logback-classic + + + io.netty + netty-tcnative + + + + + ${project.groupId} + testmocks + ${project.version} + test + org.xerial.snappy snappy-java test - org.awaitility awaitility test - org.apache.bookkeeper bookkeeper-server - io.etcd jetcd-core - - io.etcd jetcd-test test - com.github.ben-manes.caffeine caffeine - io.prometheus simpleclient - - @@ -125,7 +135,6 @@ - org.apache.maven.plugins maven-jar-plugin diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractHierarchicalLedgerManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractHierarchicalLedgerManager.java index a33c8761cba9e..4db7f4798c309 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractHierarchicalLedgerManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractHierarchicalLedgerManager.java @@ -206,10 +206,11 @@ protected void asyncProcessLedgersInSingleNode( mcb = new BookkeeperInternalCallbacks.MultiCallback(activeLedgers.size(), finalCb, ctx, successRc, failureRc); // start loop over all ledgers - for (Long ledger : activeLedgers) { - processor.process(ledger, mcb); - } - + scheduler.submit(() -> { + for (Long ledger : activeLedgers) { + processor.process(ledger, mcb); + } + }); }).exceptionally(ex -> { finalCb.processResult(failureRc, null, ctx); return null; diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractMetadataDriver.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractMetadataDriver.java index cc5f759c73fe5..435f94b05dc2b 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractMetadataDriver.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/AbstractMetadataDriver.java @@ -21,6 +21,7 @@ import java.io.Closeable; import java.io.IOException; import java.net.URI; +import java.util.concurrent.TimeUnit; import lombok.SneakyThrows; import org.apache.bookkeeper.conf.AbstractConfiguration; import org.apache.bookkeeper.discover.RegistrationClient; @@ -40,6 +41,7 @@ public abstract class AbstractMetadataDriver implements Closeable { public static final String METADATA_STORE_SCHEME = "metadata-store"; public static final String METADATA_STORE_INSTANCE = "metadata-store-instance"; + public static final long BLOCKING_CALL_TIMEOUT = TimeUnit.SECONDS.toMillis(30); protected MetadataStoreExtended store; private boolean storeInstanceIsOwned; diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/LegacyHierarchicalLedgerRangeIterator.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/LegacyHierarchicalLedgerRangeIterator.java index 15b1d561f901c..37e6dc836f254 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/LegacyHierarchicalLedgerRangeIterator.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/LegacyHierarchicalLedgerRangeIterator.java @@ -18,17 +18,21 @@ */ package org.apache.pulsar.metadata.bookkeeper; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.apache.pulsar.metadata.bookkeeper.AbstractMetadataDriver.BLOCKING_CALL_TIMEOUT; import java.io.IOException; import java.util.Iterator; import java.util.List; import java.util.NavigableSet; import java.util.NoSuchElementException; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.meta.LedgerManager; import org.apache.bookkeeper.util.StringUtils; import org.apache.pulsar.metadata.api.MetadataStore; + /** * Hierarchical Ledger Manager which manages ledger meta in zookeeper using 2-level hierarchical znodes. * @@ -67,7 +71,7 @@ public LegacyHierarchicalLedgerRangeIterator(MetadataStore store, String ledgers * @return false if have visited all level1 nodes * @throws InterruptedException/KeeperException if error occurs reading zookeeper children */ - private boolean nextL1Node() throws ExecutionException, InterruptedException { + private boolean nextL1Node() throws ExecutionException, InterruptedException, TimeoutException { l2NodesIter = null; while (l2NodesIter == null) { if (l1NodesIter.hasNext()) { @@ -79,7 +83,8 @@ private boolean nextL1Node() throws ExecutionException, InterruptedException { if (!isLedgerParentNode(curL1Nodes)) { continue; } - List l2Nodes = store.getChildren(ledgersRoot + "/" + curL1Nodes).get(); + List l2Nodes = store.getChildren(ledgersRoot + "/" + curL1Nodes) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); l2NodesIter = l2Nodes.iterator(); if (!l2NodesIter.hasNext()) { l2NodesIter = null; @@ -94,7 +99,8 @@ private synchronized void preload() throws IOException { boolean hasMoreElements = false; try { if (l1NodesIter == null) { - List l1Nodes = store.getChildren(ledgersRoot).get(); + List l1Nodes = store.getChildren(ledgersRoot) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); l1NodesIter = l1Nodes.iterator(); hasMoreElements = nextL1Node(); } else if (l2NodesIter == null || !l2NodesIter.hasNext()) { @@ -102,7 +108,7 @@ private synchronized void preload() throws IOException { } else { hasMoreElements = true; } - } catch (ExecutionException ke) { + } catch (ExecutionException | TimeoutException ke) { throw new IOException("Error preloading next range", ke); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -156,8 +162,8 @@ LedgerManager.LedgerRange getLedgerRangeByLevel(final String level1, final Strin String nodePath = nodeBuilder.toString(); List ledgerNodes = null; try { - ledgerNodes = store.getChildren(nodePath).get(); - } catch (ExecutionException e) { + ledgerNodes = store.getChildren(nodePath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); + } catch (ExecutionException | TimeoutException e) { throw new IOException("Error when get child nodes from zk", e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/LongHierarchicalLedgerRangeIterator.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/LongHierarchicalLedgerRangeIterator.java index 9a36ac53b8991..3b32916e6e7a9 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/LongHierarchicalLedgerRangeIterator.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/LongHierarchicalLedgerRangeIterator.java @@ -24,6 +24,8 @@ import java.util.NoSuchElementException; import java.util.Set; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.meta.LedgerManager; import org.apache.bookkeeper.util.StringUtils; @@ -57,8 +59,9 @@ class LongHierarchicalLedgerRangeIterator implements LedgerManager.LedgerRangeIt */ List getChildrenAt(String path) throws IOException { try { - return store.getChildren(path).get(); - } catch (ExecutionException e) { + return store.getChildren(path) + .get(AbstractMetadataDriver.BLOCKING_CALL_TIMEOUT, TimeUnit.MILLISECONDS); + } catch (ExecutionException | TimeoutException e) { if (log.isDebugEnabled()) { log.debug("Failed to get children at {}", path); } diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLayoutManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLayoutManager.java index a4336b876398a..ee06930b3c880 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLayoutManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLayoutManager.java @@ -30,7 +30,7 @@ import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; -class PulsarLayoutManager implements LayoutManager { +public class PulsarLayoutManager implements LayoutManager { @Getter(AccessLevel.PACKAGE) private final MetadataStoreExtended store; @@ -40,7 +40,7 @@ class PulsarLayoutManager implements LayoutManager { private final String layoutPath; - PulsarLayoutManager(MetadataStoreExtended store, String ledgersRootPath) { + public PulsarLayoutManager(MetadataStoreExtended store, String ledgersRootPath) { this.ledgersRootPath = ledgersRootPath; this.store = store; this.layoutPath = ledgersRootPath + "/" + BookKeeperConstants.LAYOUT_ZNODE; diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerAuditorManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerAuditorManager.java index bc35380fec19c..44870ed47f05b 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerAuditorManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerAuditorManager.java @@ -27,17 +27,19 @@ import org.apache.pulsar.metadata.api.coordination.LeaderElection; import org.apache.pulsar.metadata.api.coordination.LeaderElectionState; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.api.extended.SessionEvent; import org.apache.pulsar.metadata.coordination.impl.CoordinationServiceImpl; @Slf4j -class PulsarLedgerAuditorManager implements LedgerAuditorManager { +public class PulsarLedgerAuditorManager implements LedgerAuditorManager { - private static final String ELECTION_PATH = "leader"; + public static final String ELECTION_PATH = "leader"; private final CoordinationService coordinationService; private final LeaderElection leaderElection; private LeaderElectionState leaderElectionState; private String bookieId; + private boolean sessionExpired = false; PulsarLedgerAuditorManager(MetadataStoreExtended store, String ledgersRoot) { this.coordinationService = new CoordinationServiceImpl(store); @@ -47,6 +49,14 @@ class PulsarLedgerAuditorManager implements LedgerAuditorManager { this.leaderElection = coordinationService.getLeaderElection(String.class, electionPath, this::handleStateChanges); this.leaderElectionState = LeaderElectionState.NoLeader; + store.registerSessionListener(event -> { + if (SessionEvent.SessionLost == event) { + synchronized (this) { + sessionExpired = true; + notifyAll(); + } + } + }); } private void handleStateChanges(LeaderElectionState state) { @@ -71,6 +81,9 @@ public void tryToBecomeAuditor(String bookieId, Consumer listener) while (true) { try { synchronized (this) { + if (sessionExpired) { + throw new IllegalStateException("Zookeeper session expired, give up to become auditor."); + } if (leaderElectionState == LeaderElectionState.Leading) { return; } else { diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManager.java index 59452a3d54db3..b003c656353c0 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManager.java @@ -377,7 +377,7 @@ public void close() throws IOException { } } - private String getLedgerPath(long ledgerId) { + public String getLedgerPath(long ledgerId) { return this.ledgerRootPath + StringUtils.getHybridHierarchicalLedgerPath(ledgerId); } diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManagerFactory.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManagerFactory.java index 1b229757c9c30..bfcbf0b22d924 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManagerFactory.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerManagerFactory.java @@ -19,8 +19,12 @@ package org.apache.pulsar.metadata.bookkeeper; import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.pulsar.metadata.bookkeeper.AbstractMetadataDriver.BLOCKING_CALL_TIMEOUT; import java.io.IOException; import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.conf.AbstractConfiguration; @@ -110,7 +114,13 @@ public boolean validateAndNukeExistingCluster(AbstractConfiguration conf, * before proceeding with nuking existing cluster, make sure there * are no unexpected nodes under ledgersRootPath */ - List ledgersRootPathChildrenList = store.getChildren(ledgerRootPath).join(); + final List ledgersRootPathChildrenList; + try { + ledgersRootPathChildrenList = store.getChildren(ledgerRootPath) + .get(BLOCKING_CALL_TIMEOUT, TimeUnit.MILLISECONDS); + } catch (ExecutionException | TimeoutException e) { + throw new IOException(e); + } for (String ledgersRootPathChildren : ledgersRootPathChildrenList) { if ((!AbstractZkLedgerManager.isSpecialZnode(ledgersRootPathChildren)) && (!ledgerManager.isLedgerParentNode(ledgersRootPathChildren))) { @@ -124,18 +134,34 @@ public boolean validateAndNukeExistingCluster(AbstractConfiguration conf, format(conf, layoutManager); // now delete all the special nodes recursively - for (String ledgersRootPathChildren : store.getChildren(ledgerRootPath).join()) { - if (AbstractZkLedgerManager.isSpecialZnode(ledgersRootPathChildren)) { - store.deleteRecursive(ledgerRootPath + "/" + ledgersRootPathChildren).join(); + final List ledgersRootPathChildren; + try { + ledgersRootPathChildren = store.getChildren(ledgerRootPath) + .get(BLOCKING_CALL_TIMEOUT, TimeUnit.MILLISECONDS); + } catch (ExecutionException | TimeoutException e) { + throw new IOException(e); + } + for (String ledgersRootPathChild :ledgersRootPathChildren) { + if (AbstractZkLedgerManager.isSpecialZnode(ledgersRootPathChild)) { + try { + store.deleteRecursive(ledgerRootPath + "/" + ledgersRootPathChild) + .get(BLOCKING_CALL_TIMEOUT, TimeUnit.MILLISECONDS); + } catch (ExecutionException | TimeoutException e) { + throw new IOException(e); + } } else { log.error("Found unexpected node : {} under ledgersRootPath : {} so exiting nuke operation", - ledgersRootPathChildren, ledgerRootPath); + ledgersRootPathChild, ledgerRootPath); return false; } } // finally deleting the ledgers rootpath - store.deleteRecursive(ledgerRootPath).join(); + try { + store.deleteRecursive(ledgerRootPath).get(BLOCKING_CALL_TIMEOUT, TimeUnit.MILLISECONDS); + } catch (ExecutionException | TimeoutException e) { + throw new IOException(e); + } log.info("Successfully nuked existing cluster"); return true; diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerUnderreplicationManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerUnderreplicationManager.java index cf8ff208c94b6..2673328b81139 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerUnderreplicationManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarLedgerUnderreplicationManager.java @@ -19,16 +19,20 @@ package org.apache.pulsar.metadata.bookkeeper; import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.bookkeeper.proto.DataFormats.CheckAllLedgersFormat; import static org.apache.bookkeeper.proto.DataFormats.LedgerRereplicationLayoutFormat; import static org.apache.bookkeeper.proto.DataFormats.LockDataFormat; import static org.apache.bookkeeper.proto.DataFormats.PlacementPolicyCheckFormat; import static org.apache.bookkeeper.proto.DataFormats.ReplicasCheckFormat; import static org.apache.bookkeeper.proto.DataFormats.UnderreplicatedLedgerFormat; +import static org.apache.pulsar.metadata.bookkeeper.AbstractMetadataDriver.BLOCKING_CALL_TIMEOUT; +import com.google.common.base.Joiner; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.TextFormat; import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -41,6 +45,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; import java.util.function.Predicate; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -61,6 +66,8 @@ import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.extended.CreateOption; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.zookeeper.KeeperException; @Slf4j public class PulsarLedgerUnderreplicationManager implements LedgerUnderreplicationManager { @@ -98,14 +105,17 @@ long getLedgerNodeVersion() { private final String urLockPath; private final String layoutPath; private final String lostBookieRecoveryDelayPath; + private final String replicationDisablePath; private final String checkAllLedgersCtimePath; private final String placementPolicyCheckCtimePath; private final String replicasCheckCtimePath; private final MetadataStoreExtended store; - private BookkeeperInternalCallbacks.GenericCallback replicationEnabledListener; - private BookkeeperInternalCallbacks.GenericCallback lostBookieRecoveryDelayListener; + private final List> replicationEnabledCallbacks = + new ArrayList<>(); + private final List> lostBookieRecoveryDelayCallbacks = + new ArrayList<>(); private static class PulsarUnderreplicatedLedger extends UnderreplicatedLedger { PulsarUnderreplicatedLedger(long ledgerId) { @@ -132,6 +142,7 @@ public PulsarLedgerUnderreplicationManager(AbstractConfiguration conf, Metada urLedgerPath = basePath + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH; urLockPath = basePath + '/' + BookKeeperConstants.UNDER_REPLICATION_LOCK; lostBookieRecoveryDelayPath = basePath + '/' + BookKeeperConstants.LOSTBOOKIERECOVERYDELAY_NODE; + replicationDisablePath = basePath + '/' + BookKeeperConstants.DISABLE_NODE; checkAllLedgersCtimePath = basePath + '/' + BookKeeperConstants.CHECK_ALL_LEDGERS_CTIME; placementPolicyCheckCtimePath = basePath + '/' + BookKeeperConstants.PLACEMENT_POLICY_CHECK_CTIME; replicasCheckCtimePath = basePath + '/' + BookKeeperConstants.REPLICAS_CHECK_CTIME; @@ -225,17 +236,34 @@ private void handleNotification(Notification n) { synchronized (this) { // Notify that there were some changes on the under-replicated z-nodes notifyAll(); - - if (n.getType() == NotificationType.Deleted) { - if (n.getPath().equals(basePath + '/' + BookKeeperConstants.DISABLE_NODE)) { - log.info("LedgerReplication is enabled externally through MetadataStore, " - + "since DISABLE_NODE ZNode is deleted"); - if (replicationEnabledListener != null) { - replicationEnabledListener.operationComplete(0, null); + if (lostBookieRecoveryDelayPath.equals(n.getPath())) { + final List> callbackList; + synchronized (lostBookieRecoveryDelayCallbacks) { + callbackList = new ArrayList<>(lostBookieRecoveryDelayCallbacks); + lostBookieRecoveryDelayCallbacks.clear(); + } + for (BookkeeperInternalCallbacks.GenericCallback callback : callbackList) { + try { + callback.operationComplete(0, null); + } catch (Exception e) { + log.warn("lostBookieRecoveryDelayCallbacks handle error", e); } - } else if (n.getPath().equals(lostBookieRecoveryDelayPath)) { - if (lostBookieRecoveryDelayListener != null) { - lostBookieRecoveryDelayListener.operationComplete(0, null); + } + return; + } + if (replicationDisablePath.equals(n.getPath()) && n.getType() == NotificationType.Deleted) { + log.info("LedgerReplication is enabled externally through MetadataStore, " + + "since DISABLE_NODE ZNode is deleted"); + final List> callbackList; + synchronized (replicationEnabledCallbacks) { + callbackList = new ArrayList<>(replicationEnabledCallbacks); + replicationEnabledCallbacks.clear(); + } + for (BookkeeperInternalCallbacks.GenericCallback callback : callbackList) { + try { + callback.operationComplete(0, null); + } catch (Exception e) { + log.warn("replicationEnabledCallbacks handle error", e); } } } @@ -249,7 +277,7 @@ public UnderreplicatedLedger getLedgerUnreplicationInfo(long ledgerId) try { String path = getUrLedgerPath(ledgerId); - Optional optRes = store.get(path).get(); + Optional optRes = store.get(path).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); if (!optRes.isPresent()) { if (log.isDebugEnabled()) { log.debug("Ledger: {} is not marked underreplicated", ledgerId); @@ -270,7 +298,7 @@ public UnderreplicatedLedger getLedgerUnreplicationInfo(long ledgerId) underreplicatedLedger.setCtime(ctime); underreplicatedLedger.setReplicaList(replicaList); return underreplicatedLedger; - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { throw new ReplicationException.UnavailableException("Error contacting with metadata store", ee); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -374,14 +402,16 @@ private void handleLedgerUnderreplicatedAlreadyMarked(final String path, public void acquireUnderreplicatedLedger(long ledgerId) throws ReplicationException { try { internalAcquireUnderreplicatedLedger(ledgerId); - } catch (ExecutionException | InterruptedException e) { + } catch (ExecutionException | TimeoutException | InterruptedException e) { throw new ReplicationException.UnavailableException("Failed to acuire under-replicated ledger", e); } } - private void internalAcquireUnderreplicatedLedger(long ledgerId) throws ExecutionException, InterruptedException { + private void internalAcquireUnderreplicatedLedger(long ledgerId) throws ExecutionException, + InterruptedException, TimeoutException { String lockPath = getUrLedgerLockPath(urLockPath, ledgerId); - store.put(lockPath, LOCK_DATA, Optional.of(-1L), EnumSet.of(CreateOption.Ephemeral)).get(); + store.put(lockPath, LOCK_DATA, Optional.of(-1L), EnumSet.of(CreateOption.Ephemeral)) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } @Override @@ -392,7 +422,34 @@ public void markLedgerReplicated(long ledgerId) throws ReplicationException.Unav try { Lock l = heldLocks.get(ledgerId); if (l != null) { - store.delete(getUrLedgerPath(ledgerId), Optional.of(l.getLedgerNodeVersion())).get(); + store.delete(getUrLedgerPath(ledgerId), Optional.of(l.getLedgerNodeVersion())) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); + if (store instanceof ZKMetadataStore) { + try { + // clean up the hierarchy + String[] parts = getUrLedgerPath(ledgerId).split("/"); + for (int i = 1; i <= 4; i++) { + String[] p = Arrays.copyOf(parts, parts.length - i); + String path = Joiner.on("/").join(p); + Optional getResult = store.get(path).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); + if (getResult.isPresent()) { + store.delete(path, Optional.of(getResult.get().getStat().getVersion())) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); + } + } + } catch (ExecutionException ee) { + // This can happen when cleaning up the hierarchy. + // It's safe to ignore, it simply means another + // ledger in the same hierarchy has been marked as + // underreplicated. + if (ee.getCause() instanceof MetadataStoreException && ee.getCause().getCause() + instanceof KeeperException.NotEmptyException) { + //do nothing. + } else { + log.warn("Error deleting underrepcalited ledger parent node", ee); + } + } + } } } catch (ExecutionException ee) { if (ee.getCause() instanceof MetadataStoreException.NotFoundException) { @@ -405,6 +462,8 @@ public void markLedgerReplicated(long ledgerId) throws ReplicationException.Unav log.error("Error deleting underreplicated ledger node", ee); throw new ReplicationException.UnavailableException("Error contacting metadata store", ee); } + } catch (TimeoutException ex) { + throw new ReplicationException.UnavailableException("Error contacting metadata store", ex); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new ReplicationException.UnavailableException("Interrupted while contacting metadata store", ie); @@ -445,7 +504,7 @@ public boolean hasNext() { while (queue.size() > 0 && curBatch.size() == 0) { String parent = queue.remove(); try { - for (String c : store.getChildren(parent).get()) { + for (String c : store.getChildren(parent).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) { String child = parent + "/" + c; if (c.startsWith("urL")) { long ledgerId = getLedgerId(child); @@ -479,21 +538,23 @@ public UnderreplicatedLedger next() { } private long getLedgerToRereplicateFromHierarchy(String parent, long depth) - throws ExecutionException, InterruptedException { + throws ExecutionException, InterruptedException, TimeoutException { if (depth == 4) { - List children = new ArrayList<>(store.getChildren(parent).get()); + List children = new ArrayList<>(store.getChildren(parent) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)); Collections.shuffle(children); while (!children.isEmpty()) { String tryChild = children.get(0); try { - List locks = store.getChildren(urLockPath).get(); + List locks = store.getChildren(urLockPath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); if (locks.contains(tryChild)) { children.remove(tryChild); continue; } - Optional optRes = store.get(parent + "/" + tryChild).get(); + Optional optRes = store.get(parent + "/" + tryChild) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); if (!optRes.isPresent()) { if (log.isDebugEnabled()) { log.debug("{}/{} doesn't exist", parent, tryChild); @@ -522,7 +583,7 @@ private long getLedgerToRereplicateFromHierarchy(String parent, long depth) return -1; } - List children = new ArrayList<>(store.getChildren(parent).join()); + List children = new ArrayList<>(store.getChildren(parent).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)); Collections.shuffle(children); while (children.size() > 0) { @@ -545,7 +606,7 @@ public long pollLedgerToRereplicate() throws ReplicationException.UnavailableExc } try { return getLedgerToRereplicateFromHierarchy(urLedgerPath, 0); - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { throw new ReplicationException.UnavailableException("Error contacting metadata store", ee); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -571,7 +632,7 @@ public long getLedgerToRereplicate() throws ReplicationException.UnavailableExce // nothing found, wait for a watcher to trigger this.wait(1000); } - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { throw new ReplicationException.UnavailableException("Error contacting metadata store", ee); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -597,7 +658,8 @@ public void releaseUnderreplicatedLedger(long ledgerId) throws ReplicationExcept try { Lock l = heldLocks.get(ledgerId); if (l != null) { - store.delete(l.getLockPath(), Optional.empty()).get(); + store.delete(l.getLockPath(), Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } } catch (ExecutionException ee) { if (ee.getCause() instanceof MetadataStoreException.NotFoundException) { @@ -606,6 +668,8 @@ public void releaseUnderreplicatedLedger(long ledgerId) throws ReplicationExcept log.error("Error deleting underreplicated ledger lock", ee); throw new ReplicationException.UnavailableException("Error contacting metadata store", ee); } + } catch (TimeoutException ex) { + throw new ReplicationException.UnavailableException("Error contacting metadata store", ex); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new ReplicationException.UnavailableException("Interrupted while connecting metadata store", ie); @@ -620,7 +684,8 @@ public void close() throws ReplicationException.UnavailableException { } try { for (Map.Entry e : heldLocks.entrySet()) { - store.delete(e.getValue().getLockPath(), Optional.empty()).get(); + store.delete(e.getValue().getLockPath(), Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } } catch (ExecutionException ee) { if (ee.getCause() instanceof MetadataStoreException.NotFoundException) { @@ -629,6 +694,8 @@ public void close() throws ReplicationException.UnavailableException { log.error("Error deleting underreplicated ledger lock", ee); throw new ReplicationException.UnavailableException("Error contacting metadata store", ee); } + } catch (TimeoutException ex) { + throw new ReplicationException.UnavailableException("Error contacting metadata store", ex); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new ReplicationException.UnavailableException("Interrupted while connecting metadata store", ie); @@ -642,10 +709,10 @@ public void disableLedgerReplication() log.debug("disableLedegerReplication()"); } try { - String path = basePath + '/' + BookKeeperConstants.DISABLE_NODE; - store.put(path, "".getBytes(UTF_8), Optional.of(-1L)).get(); + store.put(replicationDisablePath, "".getBytes(UTF_8), Optional.of(-1L)) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); log.info("Auto ledger re-replication is disabled!"); - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { log.error("Exception while stopping auto ledger re-replication", ee); throw new ReplicationException.UnavailableException( "Exception while stopping auto ledger re-replication", ee); @@ -663,9 +730,10 @@ public void enableLedgerReplication() log.debug("enableLedegerReplication()"); } try { - store.delete(basePath + '/' + BookKeeperConstants.DISABLE_NODE, Optional.empty()).get(); + store.delete(replicationDisablePath, Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); log.info("Resuming automatic ledger re-replication"); - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { log.error("Exception while resuming ledger replication", ee); throw new ReplicationException.UnavailableException( "Exception while resuming auto ledger re-replication", ee); @@ -683,8 +751,9 @@ public boolean isLedgerReplicationEnabled() log.debug("isLedgerReplicationEnabled()"); } try { - return !store.exists(basePath + '/' + BookKeeperConstants.DISABLE_NODE).get(); - } catch (ExecutionException ee) { + return !store.exists(replicationDisablePath) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); + } catch (ExecutionException | TimeoutException ee) { log.error("Error while checking the state of " + "ledger re-replication", ee); throw new ReplicationException.UnavailableException( @@ -702,19 +771,18 @@ public void notifyLedgerReplicationEnabled(final BookkeeperInternalCallbacks.Gen if (log.isDebugEnabled()) { log.debug("notifyLedgerReplicationEnabled()"); } - - synchronized (this) { - replicationEnabledListener = cb; + synchronized (replicationEnabledCallbacks) { + replicationEnabledCallbacks.add(cb); } - try { - if (!store.exists(basePath + '/' + BookKeeperConstants.DISABLE_NODE).get()) { + if (!store.exists(replicationDisablePath) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) { log.info("LedgerReplication is enabled externally through metadata store, " + "since DISABLE_NODE node is deleted"); cb.operationComplete(0, null); return; } - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { log.error("Error while checking the state of " + "ledger re-replication", ee); throw new ReplicationException.UnavailableException( @@ -732,7 +800,7 @@ public void notifyLedgerReplicationEnabled(final BookkeeperInternalCallbacks.Gen @Override public boolean isLedgerBeingReplicated(long ledgerId) throws ReplicationException { try { - return store.exists(getUrLedgerLockPath(urLockPath, ledgerId)).get(); + return store.exists(getUrLedgerLockPath(urLockPath, ledgerId)).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } catch (Exception e) { throw new ReplicationException.UnavailableException("Failed to check if ledger is beinge replicated", e); } @@ -744,7 +812,7 @@ public boolean initializeLostBookieRecoveryDelay(int lostBookieRecoveryDelay) th log.debug("initializeLostBookieRecoveryDelay()"); try { store.put(lostBookieRecoveryDelayPath, Integer.toString(lostBookieRecoveryDelay).getBytes(UTF_8), - Optional.of(-1L)).get(); + Optional.of(-1L)).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } catch (ExecutionException ee) { if (ee.getCause() instanceof MetadataStoreException.BadVersionException) { log.info("lostBookieRecoveryDelay node is already present, so using " @@ -754,6 +822,9 @@ public boolean initializeLostBookieRecoveryDelay(int lostBookieRecoveryDelay) th log.error("Error while initializing LostBookieRecoveryDelay", ee); throw new ReplicationException.UnavailableException("Error contacting zookeeper", ee); } + } catch (TimeoutException ex) { + log.error("Error while initializing LostBookieRecoveryDelay", ex); + throw new ReplicationException.UnavailableException("Error contacting zookeeper", ex); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new ReplicationException.UnavailableException("Interrupted while contacting zookeeper", ie); @@ -767,9 +838,9 @@ public void setLostBookieRecoveryDelay(int lostBookieRecoveryDelay) throws log.debug("setLostBookieRecoveryDelay()"); try { store.put(lostBookieRecoveryDelayPath, Integer.toString(lostBookieRecoveryDelay).getBytes(UTF_8), - Optional.empty()).get(); + Optional.empty()).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { log.error("Error while setting LostBookieRecoveryDelay ", ee); throw new ReplicationException.UnavailableException("Error contacting zookeeper", ee); } catch (InterruptedException ie) { @@ -782,9 +853,10 @@ public void setLostBookieRecoveryDelay(int lostBookieRecoveryDelay) throws public int getLostBookieRecoveryDelay() throws ReplicationException.UnavailableException { log.debug("getLostBookieRecoveryDelay()"); try { - byte[] data = store.get(lostBookieRecoveryDelayPath).get().get().getValue(); + byte[] data = store.get(lostBookieRecoveryDelayPath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS) + .get().getValue(); return Integer.parseInt(new String(data, UTF_8)); - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { log.error("Error while getting LostBookieRecoveryDelay ", ee); throw new ReplicationException.UnavailableException("Error contacting zookeeper", ee); } catch (InterruptedException ie) { @@ -797,16 +869,16 @@ public int getLostBookieRecoveryDelay() throws ReplicationException.UnavailableE public void notifyLostBookieRecoveryDelayChanged(BookkeeperInternalCallbacks.GenericCallback cb) throws ReplicationException.UnavailableException { log.debug("notifyLostBookieRecoveryDelayChanged()"); - synchronized (this) { - lostBookieRecoveryDelayListener = cb; + synchronized (lostBookieRecoveryDelayCallbacks) { + lostBookieRecoveryDelayCallbacks.add(cb); } try { - if (!store.exists(lostBookieRecoveryDelayPath).get()) { + if (!store.exists(lostBookieRecoveryDelayPath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) { cb.operationComplete(0, null); return; } - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { log.error("Error while checking the state of lostBookieRecoveryDelay", ee); throw new ReplicationException.UnavailableException("Error contacting zookeeper", ee); } catch (InterruptedException ie) { @@ -820,7 +892,8 @@ public String getReplicationWorkerIdRereplicatingLedger(long ledgerId) throws ReplicationException.UnavailableException { try { - Optional optRes = store.get(getUrLedgerLockPath(urLockPath, ledgerId)).get(); + Optional optRes = store.get(getUrLedgerLockPath(urLockPath, ledgerId)) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); if (!optRes.isPresent()) { // this is ok. return null; @@ -831,7 +904,7 @@ public String getReplicationWorkerIdRereplicatingLedger(long ledgerId) TextFormat.merge(new String(lockData, UTF_8), lockDataBuilder); LockDataFormat lock = lockDataBuilder.build(); return lock.getBookieId(); - } catch (ExecutionException e) { + } catch (ExecutionException | TimeoutException e) { log.error("Error while getting ReplicationWorkerId rereplicating Ledger", e); throw new ReplicationException.UnavailableException( "Error while getting ReplicationWorkerId rereplicating Ledger", e); @@ -855,8 +928,9 @@ public void setCheckAllLedgersCTime(long checkAllLedgersCTime) throws Replicatio builder.setCheckAllLedgersCTime(checkAllLedgersCTime); byte[] checkAllLedgersFormatByteArray = builder.build().toByteArray(); - store.put(checkAllLedgersCtimePath, checkAllLedgersFormatByteArray, Optional.empty()).get(); - } catch (ExecutionException ee) { + store.put(checkAllLedgersCtimePath, checkAllLedgersFormatByteArray, Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); + } catch (ExecutionException | TimeoutException ee) { throw new ReplicationException.UnavailableException("Error contacting zookeeper", ee); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -870,7 +944,7 @@ public long getCheckAllLedgersCTime() throws ReplicationException.UnavailableExc log.debug("setCheckAllLedgersCTime"); } try { - Optional optRes = store.get(checkAllLedgersCtimePath).get(); + Optional optRes = store.get(checkAllLedgersCtimePath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); if (!optRes.isPresent()) { log.warn("checkAllLedgersCtimeZnode is not yet available"); return -1; @@ -879,7 +953,7 @@ public long getCheckAllLedgersCTime() throws ReplicationException.UnavailableExc CheckAllLedgersFormat checkAllLedgersFormat = CheckAllLedgersFormat.parseFrom(data); return checkAllLedgersFormat.hasCheckAllLedgersCTime() ? checkAllLedgersFormat.getCheckAllLedgersCTime() : -1; - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { throw new ReplicationException.UnavailableException("Error contacting zookeeper", ee); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -899,8 +973,9 @@ public void setPlacementPolicyCheckCTime(long placementPolicyCheckCTime) throws PlacementPolicyCheckFormat.Builder builder = PlacementPolicyCheckFormat.newBuilder(); builder.setPlacementPolicyCheckCTime(placementPolicyCheckCTime); byte[] placementPolicyCheckFormatByteArray = builder.build().toByteArray(); - store.put(placementPolicyCheckCtimePath, placementPolicyCheckFormatByteArray, Optional.empty()).get(); - } catch (ExecutionException ke) { + store.put(placementPolicyCheckCtimePath, placementPolicyCheckFormatByteArray, Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); + } catch (ExecutionException | TimeoutException ke) { throw new ReplicationException.UnavailableException("Error contacting zookeeper", ke); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -914,7 +989,8 @@ public long getPlacementPolicyCheckCTime() throws ReplicationException.Unavailab log.debug("getPlacementPolicyCheckCTime"); } try { - Optional optRes = store.get(placementPolicyCheckCtimePath).get(); + Optional optRes = store.get(placementPolicyCheckCtimePath) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); if (!optRes.isPresent()) { log.warn("placementPolicyCheckCtimeZnode is not yet available"); return -1; @@ -923,7 +999,7 @@ public long getPlacementPolicyCheckCTime() throws ReplicationException.Unavailab PlacementPolicyCheckFormat placementPolicyCheckFormat = PlacementPolicyCheckFormat.parseFrom(data); return placementPolicyCheckFormat.hasPlacementPolicyCheckCTime() ? placementPolicyCheckFormat.getPlacementPolicyCheckCTime() : -1; - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { throw new ReplicationException.UnavailableException("Error contacting zookeeper", ee); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -939,11 +1015,12 @@ public void setReplicasCheckCTime(long replicasCheckCTime) throws ReplicationExc ReplicasCheckFormat.Builder builder = ReplicasCheckFormat.newBuilder(); builder.setReplicasCheckCTime(replicasCheckCTime); byte[] replicasCheckFormatByteArray = builder.build().toByteArray(); - store.put(replicasCheckCtimePath, replicasCheckFormatByteArray, Optional.empty()).get(); + store.put(replicasCheckCtimePath, replicasCheckFormatByteArray, Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); if (log.isDebugEnabled()) { log.debug("setReplicasCheckCTime completed successfully"); } - } catch (ExecutionException ke) { + } catch (ExecutionException | TimeoutException ke) { throw new ReplicationException.UnavailableException("Error contacting zookeeper", ke); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -954,7 +1031,8 @@ public void setReplicasCheckCTime(long replicasCheckCTime) throws ReplicationExc @Override public long getReplicasCheckCTime() throws ReplicationException.UnavailableException { try { - Optional optRes = store.get(replicasCheckCtimePath).get(); + Optional optRes = store.get(replicasCheckCtimePath) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); if (!optRes.isPresent()) { log.warn("placementPolicyCheckCtimeZnode is not yet available"); return -1; @@ -965,7 +1043,7 @@ public long getReplicasCheckCTime() throws ReplicationException.UnavailableExcep log.debug("getReplicasCheckCTime completed successfully"); } return replicasCheckFormat.hasReplicasCheckCTime() ? replicasCheckFormat.getReplicasCheckCTime() : -1; - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { throw new ReplicationException.UnavailableException("Error contacting zookeeper", ee); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -978,11 +1056,13 @@ public long getReplicasCheckCTime() throws ReplicationException.UnavailableExcep @Override public void notifyUnderReplicationLedgerChanged(BookkeeperInternalCallbacks.GenericCallback cb) throws ReplicationException.UnavailableException { - log.debug("notifyUnderReplicationLedgerChanged()"); - store.registerListener(e -> { - if (e.getType() == NotificationType.Deleted && ID_EXTRACTION_PATTERN.matcher(e.getPath()).find()) { - cb.operationComplete(0, null); - } - }); + //The store listener callback executor is metadata-store executor, + //in cb.operationComplete(0, null), it will get all underreplication ledgers from metadata-store, it's sync + //operation. So it's a deadlock. +// store.registerListener(e -> { +// if (e.getType() == NotificationType.Deleted && ID_EXTRACTION_PATTERN.matcher(e.getPath()).find()) { +// cb.operationComplete(0, null); +// } +// }); } } diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationManager.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationManager.java index 25c3f10aa18fa..c6aba6b7d93d0 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationManager.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/bookkeeper/PulsarRegistrationManager.java @@ -19,10 +19,12 @@ package org.apache.pulsar.metadata.bookkeeper; import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.bookkeeper.util.BookKeeperConstants.AVAILABLE_NODE; import static org.apache.bookkeeper.util.BookKeeperConstants.COOKIE_NODE; import static org.apache.bookkeeper.util.BookKeeperConstants.INSTANCEID; import static org.apache.bookkeeper.util.BookKeeperConstants.READONLY; +import static org.apache.pulsar.metadata.bookkeeper.AbstractMetadataDriver.BLOCKING_CALL_TIMEOUT; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; @@ -32,8 +34,8 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; import lombok.Cleanup; -import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.bookie.BookieException; import org.apache.bookkeeper.conf.AbstractConfiguration; @@ -85,12 +87,11 @@ public class PulsarRegistrationManager implements RegistrationManager { } @Override - @SneakyThrows public void close() { for (ResourceLock rwBookie : bookieRegistration.values()) { try { - rwBookie.release().get(); - } catch (ExecutionException ignore) { + rwBookie.release().get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); + } catch (ExecutionException | TimeoutException ignore) { log.error("Cannot release correctly {}", rwBookie, ignore.getCause()); } catch (InterruptedException ignore) { log.error("Cannot release correctly {}", rwBookie, ignore); @@ -100,26 +101,30 @@ public void close() { for (ResourceLock roBookie : bookieRegistrationReadOnly.values()) { try { - roBookie.release().get(); - } catch (ExecutionException ignore) { + roBookie.release().get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); + } catch (ExecutionException | TimeoutException ignore) { log.error("Cannot release correctly {}", roBookie, ignore.getCause()); } catch (InterruptedException ignore) { log.error("Cannot release correctly {}", roBookie, ignore); Thread.currentThread().interrupt(); } } - coordinationService.close(); + try { + coordinationService.close(); + } catch (Exception e) { + throw new RuntimeException(e); + } } @Override public String getClusterInstanceId() throws BookieException { try { return store.get(ledgersRootPath + "/" + INSTANCEID) - .get() + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS) .map(res -> new String(res.getValue(), UTF_8)) .orElseThrow( () -> new BookieException.MetadataStoreException("BookKeeper cluster not initialized")); - } catch (ExecutionException | InterruptedException e) { + } catch (ExecutionException | InterruptedException | TimeoutException e) { throw new BookieException.MetadataStoreException("Failed to get cluster instance id", e); } } @@ -136,22 +141,24 @@ public void registerBookie(BookieId bookieId, boolean readOnly, BookieServiceInf ResourceLock rwRegistration = bookieRegistration.remove(bookieId); if (rwRegistration != null) { log.info("Bookie {} was already registered as writable, unregistering", bookieId); - rwRegistration.release().get(); + rwRegistration.release().get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } bookieRegistrationReadOnly.put(bookieId, - lockManager.acquireLock(regPathReadOnly, bookieServiceInfo).get()); + lockManager.acquireLock(regPathReadOnly, bookieServiceInfo) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)); } else { ResourceLock roRegistration = bookieRegistrationReadOnly.remove(bookieId); if (roRegistration != null) { log.info("Bookie {} was already registered as read-only, unregistering", bookieId); - roRegistration.release().get(); + roRegistration.release().get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } bookieRegistration.put(bookieId, - lockManager.acquireLock(regPath, bookieServiceInfo).get()); + lockManager.acquireLock(regPath, bookieServiceInfo) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)); } - } catch (ExecutionException ee) { + } catch (ExecutionException | TimeoutException ee) { log.error("Exception registering ephemeral node for Bookie!", ee); // Throw an IOException back up. This will cause the Bookie // constructor to error out. Alternatively, we could do a System @@ -173,18 +180,18 @@ public void unregisterBookie(BookieId bookieId, boolean readOnly) throws BookieE if (readOnly) { ResourceLock roRegistration = bookieRegistrationReadOnly.get(bookieId); if (roRegistration != null) { - roRegistration.release().get(); + roRegistration.release().get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } } else { ResourceLock rwRegistration = bookieRegistration.get(bookieId); if (rwRegistration != null) { - rwRegistration.release().get(); + rwRegistration.release().get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new BookieException.MetadataStoreException(ie); - } catch (ExecutionException e) { + } catch (ExecutionException | TimeoutException e) { throw new BookieException.MetadataStoreException(e); } } @@ -195,8 +202,9 @@ public boolean isBookieRegistered(BookieId bookieId) throws BookieException { String readonlyRegPath = bookieReadonlyRegistrationPath + "/" + bookieId; try { - return (store.exists(regPath).get() || store.exists(readonlyRegPath).get()); - } catch (ExecutionException e) { + return (store.exists(regPath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS) + || store.exists(readonlyRegPath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)); + } catch (ExecutionException | TimeoutException e) { log.error("Exception while checking registration ephemeral nodes for BookieId: {}", bookieId, e); throw new BookieException.MetadataStoreException(e); } catch (InterruptedException e) { @@ -222,7 +230,8 @@ public void writeCookie(BookieId bookieId, Versioned cookieData) throws version = ((LongVersion) cookieData.getVersion()).getLongVersion(); } - store.put(path, cookieData.getValue(), Optional.of(version)).get(); + store.put(path, cookieData.getValue(), Optional.of(version)) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new BookieException.MetadataStoreException("Interrupted writing cookie for bookie " + bookieId, ie); @@ -232,6 +241,8 @@ public void writeCookie(BookieId bookieId, Versioned cookieData) throws } else { throw new BookieException.MetadataStoreException("Failed to write cookie for bookie " + bookieId); } + } catch (TimeoutException ex) { + throw new BookieException.MetadataStoreException("Failed to write cookie for bookie " + bookieId, ex); } } @@ -239,7 +250,7 @@ public void writeCookie(BookieId bookieId, Versioned cookieData) throws public Versioned readCookie(BookieId bookieId) throws BookieException { String path = this.cookiePath + "/" + bookieId; try { - Optional res = store.get(path).get(); + Optional res = store.get(path).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); if (!res.isPresent()) { throw new BookieException.CookieNotFoundException(bookieId.toString()); } @@ -250,7 +261,7 @@ public Versioned readCookie(BookieId bookieId) throws BookieException { } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new BookieException.MetadataStoreException(ie); - } catch (ExecutionException e) { + } catch (ExecutionException | TimeoutException e) { throw new BookieException.MetadataStoreException(e); } } @@ -259,7 +270,8 @@ public Versioned readCookie(BookieId bookieId) throws BookieException { public void removeCookie(BookieId bookieId, Version version) throws BookieException { String path = this.cookiePath + "/" + bookieId; try { - store.delete(path, Optional.of(((LongVersion) version).getLongVersion())).get(); + store.delete(path, Optional.of(((LongVersion) version).getLongVersion())) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new BookieException.MetadataStoreException("Interrupted deleting cookie for bookie " + bookieId, e); @@ -269,6 +281,8 @@ public void removeCookie(BookieId bookieId, Version version) throws BookieExcept } else { throw new BookieException.MetadataStoreException("Failed to delete cookie for bookie " + bookieId); } + } catch (TimeoutException ex) { + throw new BookieException.MetadataStoreException("Failed to delete cookie for bookie " + bookieId); } log.info("Removed cookie from {} for bookie {}.", cookiePath, bookieId); @@ -276,20 +290,23 @@ public void removeCookie(BookieId bookieId, Version version) throws BookieExcept @Override public boolean prepareFormat() throws Exception { - boolean ledgerRootExists = store.exists(ledgersRootPath).get(); - boolean availableNodeExists = store.exists(bookieRegistrationPath).get(); + boolean ledgerRootExists = store.exists(ledgersRootPath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); + boolean availableNodeExists = store.exists(bookieRegistrationPath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); // Create ledgers root node if not exists if (!ledgerRootExists) { - store.put(ledgersRootPath, new byte[0], Optional.empty()).get(); + store.put(ledgersRootPath, new byte[0], Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } // create available bookies node if not exists if (!availableNodeExists) { - store.put(bookieRegistrationPath, new byte[0], Optional.empty()).get(); + store.put(bookieRegistrationPath, new byte[0], Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } // create readonly bookies node if not exists - if (!store.exists(bookieReadonlyRegistrationPath).get()) { - store.put(bookieReadonlyRegistrationPath, new byte[0], Optional.empty()).get(); + if (!store.exists(bookieReadonlyRegistrationPath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) { + store.put(bookieReadonlyRegistrationPath, new byte[0], Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } return ledgerRootExists; @@ -301,16 +318,18 @@ public boolean initNewCluster() throws Exception { log.info("Initializing metadata for new cluster, ledger root path: {}", ledgersRootPath); - if (store.exists(instanceIdPath).get()) { + if (store.exists(instanceIdPath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) { log.error("Ledger root path: {} already exists", ledgersRootPath); return false; } - store.put(ledgersRootPath, new byte[0], Optional.empty()).get(); + store.put(ledgersRootPath, new byte[0], Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); // create INSTANCEID String instanceId = UUID.randomUUID().toString(); - store.put(instanceIdPath, instanceId.getBytes(UTF_8), Optional.of(-1L)).join(); + store.put(instanceIdPath, instanceId.getBytes(UTF_8), Optional.of(-1L)) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); log.info("Successfully initiated cluster. ledger root path: {} instanceId: {}", ledgersRootPath, instanceId); @@ -321,23 +340,28 @@ public boolean initNewCluster() throws Exception { public boolean format() throws Exception { // Clear underreplicated ledgers store.deleteRecursive(PulsarLedgerUnderreplicationManager.getBasePath(ledgersRootPath) - + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH).get(); + + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); // Clear underreplicatedledger locks - store.deleteRecursive(PulsarLedgerUnderreplicationManager.getUrLockPath(ledgersRootPath)).get(); + store.deleteRecursive(PulsarLedgerUnderreplicationManager.getUrLockPath(ledgersRootPath)) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); // Clear the cookies - store.deleteRecursive(cookiePath).get(); + store.deleteRecursive(cookiePath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); // Clear the INSTANCEID - if (store.exists(ledgersRootPath + "/" + BookKeeperConstants.INSTANCEID).get()) { - store.delete(ledgersRootPath + "/" + BookKeeperConstants.INSTANCEID, Optional.empty()).get(); + if (store.exists(ledgersRootPath + "/" + BookKeeperConstants.INSTANCEID) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) { + store.delete(ledgersRootPath + "/" + BookKeeperConstants.INSTANCEID, Optional.empty()) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); } // create INSTANCEID String instanceId = UUID.randomUUID().toString(); store.put(ledgersRootPath + "/" + BookKeeperConstants.INSTANCEID, - instanceId.getBytes(StandardCharsets.UTF_8), Optional.of(-1L)).get(); + instanceId.getBytes(StandardCharsets.UTF_8), Optional.of(-1L)) + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS); log.info("Successfully formatted BookKeeper metadata"); return true; @@ -347,7 +371,7 @@ public boolean format() throws Exception { public boolean nukeExistingCluster() throws Exception { log.info("Nuking metadata of existing cluster, ledger root path: {}", ledgersRootPath); - if (!store.exists(ledgersRootPath + "/" + INSTANCEID).join()) { + if (!store.exists(ledgersRootPath + "/" + INSTANCEID).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) { log.info("There is no existing cluster with ledgersRootPath: {}, so exiting nuke operation", ledgersRootPath); return true; @@ -356,17 +380,19 @@ public boolean nukeExistingCluster() throws Exception { @Cleanup RegistrationClient registrationClient = new PulsarRegistrationClient(store, ledgersRootPath); - Collection rwBookies = registrationClient.getWritableBookies().join().getValue(); + Collection rwBookies = registrationClient.getWritableBookies() + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS).getValue(); if (rwBookies != null && !rwBookies.isEmpty()) { log.error("Bookies are still up and connected to this cluster, " - + "stop all bookies before nuking the cluster"); + + "stop all bookies before nuking the cluster"); return false; } - Collection roBookies = registrationClient.getReadOnlyBookies().join().getValue(); + Collection roBookies = registrationClient.getReadOnlyBookies() + .get(BLOCKING_CALL_TIMEOUT, MILLISECONDS).getValue(); if (roBookies != null && !roBookies.isEmpty()) { log.error("Readonly Bookies are still up and connected to this cluster, " - + "stop all bookies before nuking the cluster"); + + "stop all bookies before nuking the cluster"); return false; } diff --git a/pulsar-metadata/src/main/resources/findbugsExclude.xml b/pulsar-metadata/src/main/resources/findbugsExclude.xml index 21578b9f8cb5c..d6eb8623eb73e 100644 --- a/pulsar-metadata/src/main/resources/findbugsExclude.xml +++ b/pulsar-metadata/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - - - - + + + + + + + + + + + + diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieCheckTaskTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieCheckTaskTest.java new file mode 100644 index 0000000000000..d30af4de6a803 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieCheckTaskTest.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.testng.AssertJUnit.assertTrue; +import com.beust.jcommander.internal.Lists; +import com.beust.jcommander.internal.Sets; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.stats.OpStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.bookkeeper.versioning.LongVersion; +import org.apache.bookkeeper.versioning.Versioned; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Unit test {@link AuditorBookieCheckTask}. + */ +public class AuditorBookieCheckTaskTest { + + private AuditorStats auditorStats; + private BookKeeperAdmin admin; + private LedgerManager ledgerManager; + private LedgerUnderreplicationManager underreplicationManager; + private BookieLedgerIndexer ledgerIndexer; + private AuditorBookieCheckTask bookieCheckTask; + private final AtomicBoolean shutdownCompleted = new AtomicBoolean(false); + private final AuditorTask.ShutdownTaskHandler shutdownTaskHandler = () -> shutdownCompleted.set(true); + private long startLedgerId = 0; + + @BeforeMethod + public void setup() { + ServerConfiguration conf = mock(ServerConfiguration.class); + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsProvider.TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + final AuditorStats auditorStats = new AuditorStats(statsLogger); + this.auditorStats = spy(auditorStats); + admin = mock(BookKeeperAdmin.class); + ledgerManager = mock(LedgerManager.class); + underreplicationManager = mock(LedgerUnderreplicationManager.class); + ledgerIndexer = mock(BookieLedgerIndexer.class); + AuditorBookieCheckTask bookieCheckTask1 = new AuditorBookieCheckTask( + conf, this.auditorStats, admin, ledgerManager, underreplicationManager, + shutdownTaskHandler, ledgerIndexer, null, null); + bookieCheckTask = spy(bookieCheckTask1); + } + + @Test + public void testShutdownAuditBookiesException() + throws BKException, ReplicationException.BKAuditException, InterruptedException { + doThrow(new ReplicationException.BKAuditException("test failed")) + .when(bookieCheckTask) + .auditBookies(); + bookieCheckTask.startAudit(true); + + assertTrue("shutdownTaskHandler should be execute.", shutdownCompleted.get()); + } + + @Test + public void testAuditBookies() + throws ReplicationException.UnavailableException, ReplicationException.BKAuditException, BKException { + final String bookieId1 = "127.0.0.1:1000"; + final String bookieId2 = "127.0.0.1:1001"; + final long bookie1LedgersCount = 10; + final long bookie2LedgersCount = 20; + + final Map> bookiesAndLedgers = new HashMap<>(); + bookiesAndLedgers.put(bookieId1, getLedgers(bookie1LedgersCount)); + bookiesAndLedgers.put(bookieId2, getLedgers(bookie2LedgersCount)); + when(ledgerIndexer.getBookieToLedgerIndex()).thenReturn(bookiesAndLedgers); + when(underreplicationManager.isLedgerReplicationEnabled()).thenReturn(true); + + CompletableFuture> metaPromise = new CompletableFuture<>(); + final LongVersion version = mock(LongVersion.class); + final LedgerMetadata metadata = mock(LedgerMetadata.class); + metaPromise.complete(new Versioned<>(metadata, version)); + when(ledgerManager.readLedgerMetadata(anyLong())).thenReturn(metaPromise); + + CompletableFuture markPromise = new CompletableFuture<>(); + markPromise.complete(null); + when(underreplicationManager.markLedgerUnderreplicatedAsync(anyLong(), anyCollection())) + .thenReturn(markPromise); + + OpStatsLogger numUnderReplicatedLedgerStats = mock(OpStatsLogger.class); + when(auditorStats.getNumUnderReplicatedLedger()).thenReturn(numUnderReplicatedLedgerStats); + + final List availableBookies = Lists.newArrayList(); + final List readOnlyBookies = Lists.newArrayList(); + // test bookie1 lost + availableBookies.add(BookieId.parse(bookieId2)); + when(admin.getAvailableBookies()).thenReturn(availableBookies); + when(admin.getReadOnlyBookies()).thenReturn(readOnlyBookies); + bookieCheckTask.startAudit(true); + verify(numUnderReplicatedLedgerStats, times(1)) + .registerSuccessfulValue(eq(bookie1LedgersCount)); + + // test bookie2 lost + numUnderReplicatedLedgerStats = mock(OpStatsLogger.class); + when(auditorStats.getNumUnderReplicatedLedger()).thenReturn(numUnderReplicatedLedgerStats); + availableBookies.clear(); + availableBookies.add(BookieId.parse(bookieId1)); + bookieCheckTask.startAudit(true); + verify(numUnderReplicatedLedgerStats, times(1)) + .registerSuccessfulValue(eq(bookie2LedgersCount)); + + } + + private Set getLedgers(long count) { + final Set ledgers = Sets.newHashSet(); + for (int i = 0; i < count; i++) { + ledgers.add(i + startLedgerId++); + } + return ledgers; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieTest.java new file mode 100644 index 0000000000000..14cdf3e1fc29c --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorBookieTest.java @@ -0,0 +1,298 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertNotSame; +import static org.testng.AssertJUnit.assertSame; +import static org.testng.AssertJUnit.assertTrue; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.zk.ZKMetadataDriverBase; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.proto.BookieServer; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerAuditorManager; +import org.apache.zookeeper.ZooKeeper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * This test verifies the auditor bookie scenarios which will be monitoring the + * bookie failures. + */ +public class AuditorBookieTest extends BookKeeperClusterTestCase { + // Depending on the taste, select the amount of logging + // by decommenting one of the two lines below + // private static final Logger LOG = Logger.getRootLogger(); + private static final Logger LOG = LoggerFactory + .getLogger(AuditorBookieTest.class); + private String electionPath; + private HashMap auditorElectors = new HashMap(); + private List zkClients = new LinkedList(); + + public AuditorBookieTest() throws Exception { + super(6); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + electionPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseConf) + + "/underreplication/" + PulsarLedgerAuditorManager.ELECTION_PATH; + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + startAuditorElectors(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + stopAuditorElectors(); + for (ZooKeeper zk : zkClients) { + zk.close(); + } + zkClients.clear(); + super.tearDown(); + } + + /** + * Test should ensure only one should act as Auditor. Starting/shutdown + * other than auditor bookie shouldn't initiate re-election and multiple + * auditors. + */ + @Test + public void testEnsureOnlySingleAuditor() throws Exception { + BookieServer auditor = verifyAuditor(); + + // shutdown bookie which is not an auditor + int indexOf = indexOfServer(auditor); + int bkIndexDownBookie; + if (indexOf < lastBookieIndex()) { + bkIndexDownBookie = indexOf + 1; + } else { + bkIndexDownBookie = indexOf - 1; + } + shutdownBookie(serverByIndex(bkIndexDownBookie)); + + startNewBookie(); + startNewBookie(); + // grace period for the auditor re-election if any + BookieServer newAuditor = waitForNewAuditor(auditor); + assertSame( + "Auditor re-election is not happened for auditor failure!", + auditor, newAuditor); + } + + /** + * Test Auditor crashes should trigger re-election and another bookie should + * take over the auditor ship. + */ + @Test + public void testSuccessiveAuditorCrashes() throws Exception { + BookieServer auditor = verifyAuditor(); + shutdownBookie(auditor); + + BookieServer newAuditor1 = waitForNewAuditor(auditor); + shutdownBookie(newAuditor1); + BookieServer newAuditor2 = waitForNewAuditor(newAuditor1); + assertNotSame( + "Auditor re-election is not happened for auditor failure!", + auditor, newAuditor2); + } + + /** + * Test restarting the entire bookie cluster. It shouldn't create multiple + * bookie auditors. + */ + @Test + public void testBookieClusterRestart() throws Exception { + BookieServer auditor = verifyAuditor(); + for (AuditorElector auditorElector : auditorElectors.values()) { + assertTrue("Auditor elector is not running!", auditorElector + .isRunning()); + } + stopBKCluster(); + stopAuditorElectors(); + + startBKCluster(zkUtil.getMetadataServiceUri()); + //startBKCluster(zkUtil.getMetadataServiceUri()) override the base conf metadataServiceUri + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + startAuditorElectors(); + BookieServer newAuditor = waitForNewAuditor(auditor); + assertNotSame( + "Auditor re-election is not happened for auditor failure!", + auditor, newAuditor); + } + + /** + * Test the vote is deleting from the ZooKeeper during shutdown. + */ + @Test + public void testShutdown() throws Exception { + BookieServer auditor = verifyAuditor(); + shutdownBookie(auditor); + + // waiting for new auditor + BookieServer newAuditor = waitForNewAuditor(auditor); + assertNotSame( + "Auditor re-election is not happened for auditor failure!", + auditor, newAuditor); + + List children = zkc.getChildren(electionPath, false); + for (String child : children) { + byte[] data = zkc.getData(electionPath + '/' + child, false, null); + String bookieIP = new String(data); + String addr = auditor.getBookieId().toString(); + assertFalse("AuditorElection cleanup fails", bookieIP + .contains(addr)); + } + } + + /** + * Test restart of the previous Auditor bookie shouldn't initiate + * re-election and should create new vote after restarting. + */ + @Test + public void testRestartAuditorBookieAfterCrashing() throws Exception { + BookieServer auditor = verifyAuditor(); + + String addr = auditor.getBookieId().toString(); + + // restarting Bookie with same configurations. + ServerConfiguration serverConfiguration = shutdownBookie(auditor); + + auditorElectors.remove(addr); + startBookie(serverConfiguration); + // starting corresponding auditor elector + + if (LOG.isDebugEnabled()) { + LOG.debug("Performing Auditor Election:" + addr); + } + startAuditorElector(addr); + + // waiting for new auditor to come + BookieServer newAuditor = waitForNewAuditor(auditor); + assertNotSame( + "Auditor re-election is not happened for auditor failure!", + auditor, newAuditor); + assertFalse("No relection after old auditor rejoins", auditor + .getBookieId().equals(newAuditor.getBookieId())); + } + + private void startAuditorElector(String addr) throws Exception { + AuditorElector auditorElector = new AuditorElector(addr, + baseConf); + auditorElectors.put(addr, auditorElector); + auditorElector.start(); + if (LOG.isDebugEnabled()) { + LOG.debug("Starting Auditor Elector"); + } + } + + private void startAuditorElectors() throws Exception { + for (BookieId addr : bookieAddresses()) { + startAuditorElector(addr.toString()); + } + } + + private void stopAuditorElectors() throws Exception { + for (AuditorElector auditorElector : auditorElectors.values()) { + auditorElector.shutdown(); + if (LOG.isDebugEnabled()) { + LOG.debug("Stopping Auditor Elector!"); + } + } + } + + private BookieServer verifyAuditor() throws Exception { + List auditors = getAuditorBookie(); + assertEquals("Multiple Bookies acting as Auditor!", 1, auditors + .size()); + if (LOG.isDebugEnabled()) { + LOG.debug("Bookie running as Auditor:" + auditors.get(0)); + } + return auditors.get(0); + } + + private List getAuditorBookie() throws Exception { + List auditors = new LinkedList(); + byte[] data = zkc.getData(electionPath, false, null); + assertNotNull("Auditor election failed", data); + for (int i = 0; i < bookieCount(); i++) { + BookieServer bks = serverByIndex(i); + if (new String(data).contains(bks.getBookieId() + "")) { + auditors.add(bks); + } + } + return auditors; + } + + private ServerConfiguration shutdownBookie(BookieServer bkServer) throws Exception { + int index = indexOfServer(bkServer); + String addr = addressByIndex(index).toString(); + if (LOG.isDebugEnabled()) { + LOG.debug("Shutting down bookie:" + addr); + } + + // shutdown bookie which is an auditor + ServerConfiguration conf = killBookie(index); + + // stopping corresponding auditor elector + auditorElectors.get(addr).shutdown(); + return conf; + } + + private BookieServer waitForNewAuditor(BookieServer auditor) + throws Exception { + BookieServer newAuditor = null; + int retryCount = 8; + while (retryCount > 0) { + try { + List auditors = getAuditorBookie(); + if (auditors.size() > 0) { + newAuditor = auditors.get(0); + if (auditor != newAuditor) { + break; + } + } + } catch (Exception ignore) { + } + + Thread.sleep(500); + retryCount--; + } + assertNotNull( + "New Auditor is not reelected after auditor crashes", + newAuditor); + verifyAuditor(); + return newAuditor; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorCheckAllLedgersTaskTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorCheckAllLedgersTaskTest.java new file mode 100644 index 0000000000000..6b58c72af0766 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorCheckAllLedgersTaskTest.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import java.util.LinkedList; +import java.util.List; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.meta.LayoutManager; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.bookkeeper.PulsarLayoutManager; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Unit test {@link AuditorCheckAllLedgersTask}. + */ +public class AuditorCheckAllLedgersTaskTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(AuditorCheckAllLedgersTaskTest.class); + + private static final int maxNumberOfConcurrentOpenLedgerOperations = 500; + private static final int acquireConcurrentOpenLedgerOperationsTimeoutMSec = 120000; + + private BookKeeperAdmin admin; + private LedgerManager ledgerManager; + private LedgerUnderreplicationManager ledgerUnderreplicationManager; + + public AuditorCheckAllLedgersTaskTest() { + super(3); + baseConf.setPageLimit(1); + baseConf.setAutoRecoveryDaemonEnabled(false); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + final BookKeeper bookKeeper = new BookKeeper(baseClientConf); + admin = new BookKeeperAdmin(bookKeeper, NullStatsLogger.INSTANCE, new ClientConfiguration(baseClientConf)); + + String ledgersRoot = "/ledgers"; + String storeUri = metadataServiceUri.replaceAll("zk://", "").replaceAll("/ledgers", ""); + MetadataStoreExtended store = MetadataStoreExtended.create(storeUri, + MetadataStoreConfig.builder().fsyncEnable(false).build()); + LayoutManager layoutManager = new PulsarLayoutManager(store, ledgersRoot); + PulsarLedgerManagerFactory ledgerManagerFactory = new PulsarLedgerManagerFactory(); + + ClientConfiguration conf = new ClientConfiguration(); + conf.setZkLedgersRootPath(ledgersRoot); + ledgerManagerFactory.initialize(conf, layoutManager, 1); + ledgerUnderreplicationManager = ledgerManagerFactory.newLedgerUnderreplicationManager(); + ledgerManager = ledgerManagerFactory.newLedgerManager(); + + baseConf.setAuditorMaxNumberOfConcurrentOpenLedgerOperations(maxNumberOfConcurrentOpenLedgerOperations); + baseConf.setAuditorAcquireConcurrentOpenLedgerOperationsTimeoutMSec( + acquireConcurrentOpenLedgerOperationsTimeoutMSec); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (ledgerManager != null) { + ledgerManager.close(); + } + if (ledgerUnderreplicationManager != null) { + ledgerUnderreplicationManager.close(); + } + if (admin != null) { + admin.close(); + } + super.tearDown(); + } + + @Test + public void testCheckAllLedgers() throws Exception { + // 1. create ledgers + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + // 2. init CheckAllLedgersTask + final TestStatsProvider statsProvider = new TestStatsProvider(); + final TestStatsProvider.TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + final AuditorStats auditorStats = new AuditorStats(statsLogger); + + AuditorCheckAllLedgersTask auditorCheckAllLedgersTask = new AuditorCheckAllLedgersTask( + baseConf, auditorStats, admin, ledgerManager, + ledgerUnderreplicationManager, null, (flag, throwable) -> flag.set(false)); + + // 3. checkAllLedgers + auditorCheckAllLedgersTask.runTask(); + + // 4. verify + assertEquals("CHECK_ALL_LEDGERS_TIME", 1, ((TestStatsProvider.TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.CHECK_ALL_LEDGERS_TIME)).getSuccessCount()); + assertEquals("NUM_LEDGERS_CHECKED", numLedgers, + (long) statsLogger.getCounter(ReplicationStats.NUM_LEDGERS_CHECKED).get()); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorLedgerCheckerTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorLedgerCheckerTest.java new file mode 100644 index 0000000000000..970c45964f784 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorLedgerCheckerTest.java @@ -0,0 +1,1138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertNotSame; +import static org.testng.AssertJUnit.assertTrue; +import static org.testng.AssertJUnit.fail; +import java.net.URI; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import lombok.Cleanup; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.client.AsyncCallback.AddCallback; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper.DigestType; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.client.LedgerMetadataBuilder; +import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.common.util.OrderedScheduler; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.LayoutManager; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataClientDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.meta.UnderreplicatedLedger; +import org.apache.bookkeeper.meta.ZkLedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.zk.ZKMetadataDriverBase; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.proto.BookieServer; +import org.apache.bookkeeper.replication.ReplicationException.UnavailableException; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.MetadataStoreException; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.bookkeeper.PulsarLayoutManager; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerAuditorManager; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Tests publishing of under replicated ledgers by the Auditor bookie node when + * corresponding bookies identifes as not running. + */ +public class AuditorLedgerCheckerTest extends BookKeeperClusterTestCase { + + // Depending on the taste, select the amount of logging + // by decommenting one of the two lines below + // private static final Logger LOG = Logger.getRootLogger(); + private static final Logger LOG = LoggerFactory + .getLogger(AuditorLedgerCheckerTest.class); + + private static final byte[] ledgerPassword = "aaa".getBytes(); + private Random rng; // Random Number Generator + + private DigestType digestType; + + private String underreplicatedPath; + private Map auditorElectors = new ConcurrentHashMap<>(); + private LedgerUnderreplicationManager urLedgerMgr; + + private Set urLedgerList; + private String electionPath; + + private List ledgerList; + + public AuditorLedgerCheckerTest() + throws Exception { + this("org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory"); + } + + AuditorLedgerCheckerTest(String ledgerManagerFactoryClass) + throws Exception { + super(3); + LOG.info("Running test case using ledger manager : " + + ledgerManagerFactoryClass); + this.digestType = DigestType.CRC32; + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); // set ledger manager name + baseConf.setLedgerManagerFactoryClassName(ledgerManagerFactoryClass); + baseClientConf + .setLedgerManagerFactoryClassName(ledgerManagerFactoryClass); + } + + @BeforeMethod + public void setUp() throws Exception { + super.setUp(); + underreplicatedPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseClientConf) + + "/underreplication/ledgers"; + electionPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseConf) + + "/underreplication/" + PulsarLedgerAuditorManager.ELECTION_PATH; + + String ledgersRoot = "/ledgers"; + String storeUri = metadataServiceUri.replaceAll("zk://", "").replaceAll("/ledgers", ""); + MetadataStoreExtended store = MetadataStoreExtended.create(storeUri, + MetadataStoreConfig.builder().fsyncEnable(false).build()); + LayoutManager layoutManager = new PulsarLayoutManager(store, ledgersRoot); + PulsarLedgerManagerFactory ledgerManagerFactory = new PulsarLedgerManagerFactory(); + ClientConfiguration conf = new ClientConfiguration(); + conf.setZkLedgersRootPath(ledgersRoot); + ledgerManagerFactory.initialize(conf, layoutManager, 1); + urLedgerMgr = ledgerManagerFactory.newLedgerUnderreplicationManager(); + urLedgerMgr.setCheckAllLedgersCTime(System.currentTimeMillis()); + + baseClientConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + startAuditorElectors(); + rng = new Random(System.currentTimeMillis()); // Initialize the Random + urLedgerList = new HashSet(); + ledgerList = new ArrayList(2); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + stopAuditorElectors(); + super.tearDown(); + } + + private void startAuditorElectors() throws Exception { + for (String addr : bookieAddresses().stream().map(Object::toString) + .collect(Collectors.toList())) { + AuditorElector auditorElector = new AuditorElector(addr, baseConf); + auditorElectors.put(addr, auditorElector); + auditorElector.start(); + if (LOG.isDebugEnabled()) { + LOG.debug("Starting Auditor Elector"); + } + } + } + + private void stopAuditorElectors() throws Exception { + for (AuditorElector auditorElector : auditorElectors.values()) { + auditorElector.shutdown(); + if (LOG.isDebugEnabled()) { + LOG.debug("Stopping Auditor Elector!"); + } + } + } + + /** + * Test publishing of under replicated ledgers by the auditor bookie. + */ + @Test + public void testSimpleLedger() throws Exception { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + int bkShutdownIndex = lastBookieIndex(); + String shutdownBookie = shutdownBookie(bkShutdownIndex); + + // grace period for publishing the bk-ledger + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + waitForAuditToComplete(); + underReplicaLatch.await(5, TimeUnit.SECONDS); + Map urLedgerData = getUrLedgerData(urLedgerList); + assertEquals("Missed identifying under replicated ledgers", 1, + urLedgerList.size()); + + /* + * Sample data format present in the under replicated ledger path + * + * {4=replica: "10.18.89.153:5002"} + */ + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + String data = urLedgerData.get(ledgerId); + assertTrue("Bookie " + shutdownBookie + + "is not listed in the ledger as missing replica :" + data, + data.contains(shutdownBookie)); + } + + /** + * Test once published under replicated ledger should exists even after + * restarting respective bookie. + */ + @Test + public void testRestartBookie() throws Exception { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + LedgerHandle lh2 = createAndAddEntriesToLedger(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Created following ledgers : {}, {}", lh1, lh2); + } + + int bkShutdownIndex = lastBookieIndex(); + ServerConfiguration bookieConf1 = confByIndex(bkShutdownIndex); + String shutdownBookie = shutdownBookie(bkShutdownIndex); + + // restart the failed bookie + startAndAddBookie(bookieConf1); + + waitForLedgerMissingReplicas(lh1.getId(), 10, shutdownBookie); + waitForLedgerMissingReplicas(lh2.getId(), 10, shutdownBookie); + } + + /** + * Test publishing of under replicated ledgers when multiple bookie failures + * one after another. + */ + @Test + public void testMultipleBookieFailures() throws Exception { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + + // failing first bookie + shutdownBookie(lastBookieIndex()); + + // simulate re-replication + doLedgerRereplication(lh1.getId()); + + // failing another bookie + String shutdownBookie = shutdownBookie(lastBookieIndex()); + + // grace period for publishing the bk-ledger + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertTrue("Ledger should be missing second replica", + waitForLedgerMissingReplicas(lh1.getId(), 10, shutdownBookie)); + } + + @Test + public void testToggleLedgerReplication() throws Exception { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + ledgerList.add(lh1.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("Created following ledgers : " + ledgerList); + } + + // failing another bookie + CountDownLatch urReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + // disabling ledger replication + urLedgerMgr.disableLedgerReplication(); + ArrayList shutdownBookieList = new ArrayList(); + shutdownBookieList.add(shutdownBookie(lastBookieIndex())); + shutdownBookieList.add(shutdownBookie(lastBookieIndex())); + + assertFalse("Ledger replication is not disabled!", urReplicaLatch + .await(1, TimeUnit.SECONDS)); + + // enabling ledger replication + urLedgerMgr.enableLedgerReplication(); + assertTrue("Ledger replication is not enabled!", urReplicaLatch.await( + 5, TimeUnit.SECONDS)); + } + + @Test + public void testDuplicateEnDisableAutoRecovery() throws Exception { + urLedgerMgr.disableLedgerReplication(); + try { + urLedgerMgr.disableLedgerReplication(); + fail("Must throw exception, since AutoRecovery is already disabled"); + } catch (UnavailableException e) { + assertTrue("AutoRecovery is not disabled previously!", + e.getCause().getCause() instanceof MetadataStoreException.BadVersionException); + } + urLedgerMgr.enableLedgerReplication(); + try { + urLedgerMgr.enableLedgerReplication(); + fail("Must throw exception, since AutoRecovery is already enabled"); + } catch (UnavailableException e) { + assertTrue("AutoRecovery is not enabled previously!", + e.getCause().getCause() instanceof MetadataStoreException.NotFoundException); + } + } + + /** + * Test Auditor should consider Readonly bookie as available bookie. Should not publish ur ledgers for + * readonly bookies. + */ + @Test + public void testReadOnlyBookieExclusionFromURLedgersCheck() throws Exception { + LedgerHandle lh = createAndAddEntriesToLedger(); + ledgerList.add(lh.getId()); + if (LOG.isDebugEnabled()) { + LOG.debug("Created following ledgers : " + ledgerList); + } + + int count = ledgerList.size(); + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(count); + + final int bkIndex = 2; + ServerConfiguration bookieConf = confByIndex(bkIndex); + BookieServer bk = serverByIndex(bkIndex); + bookieConf.setReadOnlyModeEnabled(true); + + ((BookieImpl) bk.getBookie()).getStateManager().doTransitionToReadOnlyMode(); + bkc.waitForReadOnlyBookie(BookieImpl.getBookieId(confByIndex(bkIndex))) + .get(30, TimeUnit.SECONDS); + + // grace period for publishing the bk-ledger + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for Auditor to finish ledger check."); + } + waitForAuditToComplete(); + assertFalse("latch should not have completed", underReplicaLatch.await(5, TimeUnit.SECONDS)); + } + + /** + * Test Auditor should consider Readonly bookie fail and publish ur ledgers for readonly bookies. + */ + @Test + public void testReadOnlyBookieShutdown() throws Exception { + LedgerHandle lh = createAndAddEntriesToLedger(); + long ledgerId = lh.getId(); + ledgerList.add(ledgerId); + if (LOG.isDebugEnabled()) { + LOG.debug("Created following ledgers : " + ledgerList); + } + + int count = ledgerList.size(); + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(count); + + int bkIndex = lastBookieIndex(); + if (LOG.isDebugEnabled()) { + LOG.debug("Moving bookie {} {} to read only...", bkIndex, serverByIndex(bkIndex)); + } + ServerConfiguration bookieConf = confByIndex(bkIndex); + BookieServer bk = serverByIndex(bkIndex); + bookieConf.setReadOnlyModeEnabled(true); + + ((BookieImpl) bk.getBookie()).getStateManager().doTransitionToReadOnlyMode(); + bkc.waitForReadOnlyBookie(BookieImpl.getBookieId(confByIndex(bkIndex))) + .get(30, TimeUnit.SECONDS); + + // grace period for publishing the bk-ledger + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for Auditor to finish ledger check."); + } + waitForAuditToComplete(); + assertFalse("latch should not have completed", underReplicaLatch.await(1, TimeUnit.SECONDS)); + + String shutdownBookie = shutdownBookie(bkIndex); + + // grace period for publishing the bk-ledger + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + waitForAuditToComplete(); + underReplicaLatch.await(5, TimeUnit.SECONDS); + Map urLedgerData = getUrLedgerData(urLedgerList); + assertEquals("Missed identifying under replicated ledgers", 1, urLedgerList.size()); + + /* + * Sample data format present in the under replicated ledger path + * + * {4=replica: "10.18.89.153:5002"} + */ + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, urLedgerList.contains(ledgerId)); + String data = urLedgerData.get(ledgerId); + assertTrue("Bookie " + shutdownBookie + "is not listed in the ledger as missing replica :" + data, + data.contains(shutdownBookie)); + } + + public void testInnerDelayedAuditOfLostBookies() throws Exception { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + // wait for 5 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(5); + + AtomicReference shutdownBookieRef = new AtomicReference<>(); + CountDownLatch shutdownLatch = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie = shutDownNonAuditorBookie(); + shutdownBookieRef.set(shutdownBookie); + shutdownLatch.countDown(); + } catch (Exception ignore) { + } + }).start(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(4, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // wait for another 5 seconds for the ledger to get reported as under replicated + assertTrue("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + Map urLedgerData = getUrLedgerData(urLedgerList); + String data = urLedgerData.get(ledgerId); + shutdownLatch.await(); + assertTrue("Bookie " + shutdownBookieRef.get() + + "is not listed in the ledger as missing replica :" + data, + data.contains(shutdownBookieRef.get())); + } + + /** + * Test publishing of under replicated ledgers by the auditor + * bookie is delayed if LostBookieRecoveryDelay option is set. + */ + @Test + public void testDelayedAuditOfLostBookies() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + testInnerDelayedAuditOfLostBookies(); + } + + /** + * Test publishing of under replicated ledgers by the auditor + * bookie is delayed if LostBookieRecoveryDelay option is set + * and it continues to be delayed even when periodic bookie check + * is set to run every 2 secs. I.e. periodic bookie check doesn't + * override the delay + */ + @Test + public void testDelayedAuditWithPeriodicBookieCheck() throws Exception { + // enable periodic bookie check on a cadence of every 2 seconds. + // this requires us to stop the auditor/auditorElectors, set the + // periodic check interval and restart the auditorElectors + stopAuditorElectors(); + baseConf.setAuditorPeriodicBookieCheckInterval(2); + startAuditorElectors(); + + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + // the delaying of audit should just work despite the fact + // we have enabled periodic bookie check + testInnerDelayedAuditOfLostBookies(); + } + + @Test + public void testRescheduleOfDelayedAuditOfLostBookiesToStartImmediately() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + // wait for 50 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(50); + + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + AtomicReference shutdownBookieRef = new AtomicReference<>(); + CountDownLatch shutdownLatch = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie = shutDownNonAuditorBookie(); + shutdownBookieRef.set(shutdownBookie); + shutdownLatch.countDown(); + } catch (Exception ignore) { + } + }).start(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(4, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // set lostBookieRecoveryDelay to 0, so that it triggers AuditTask immediately + urLedgerMgr.setLostBookieRecoveryDelay(0); + + // wait for 1 second for the ledger to get reported as under replicated + assertTrue("audit of lost bookie isn't delayed", underReplicaLatch.await(1, TimeUnit.SECONDS)); + + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + Map urLedgerData = getUrLedgerData(urLedgerList); + String data = urLedgerData.get(ledgerId); + shutdownLatch.await(); + assertTrue("Bookie " + shutdownBookieRef.get() + + "is not listed in the ledger as missing replica :" + data, + data.contains(shutdownBookieRef.get())); + } + + @Test + public void testRescheduleOfDelayedAuditOfLostBookiesToStartLater() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + // wait for 3 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(3); + + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + AtomicReference shutdownBookieRef = new AtomicReference<>(); + CountDownLatch shutdownLatch = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie = shutDownNonAuditorBookie(); + shutdownBookieRef.set(shutdownBookie); + shutdownLatch.countDown(); + } catch (Exception ignore) { + } + }).start(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // set lostBookieRecoveryDelay to 4, so the pending AuditTask is resheduled + urLedgerMgr.setLostBookieRecoveryDelay(4); + + // since we changed the BookieRecoveryDelay period to 4, the audittask shouldn't have been executed + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // wait for 3 seconds (since we already waited for 2 secs) for the ledger to get reported as under replicated + assertTrue("audit of lost bookie isn't delayed", underReplicaLatch.await(3, TimeUnit.SECONDS)); + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + Map urLedgerData = getUrLedgerData(urLedgerList); + String data = urLedgerData.get(ledgerId); + shutdownLatch.await(); + assertTrue("Bookie " + shutdownBookieRef.get() + + "is not listed in the ledger as missing replica :" + data, + data.contains(shutdownBookieRef.get())); + } + + @Test + public void testTriggerAuditorWithNoPendingAuditTask() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + int lostBookieRecoveryDelayConfValue = baseConf.getLostBookieRecoveryDelay(); + Auditor auditorBookiesAuditor = getAuditorBookiesAuditor(); + Future auditTask = auditorBookiesAuditor.getAuditTask(); + int lostBookieRecoveryDelayBeforeChange = auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange(); + assertEquals("auditTask is supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to BaseConf's lostBookieRecoveryDelay", + lostBookieRecoveryDelayConfValue, lostBookieRecoveryDelayBeforeChange); + + @Cleanup("shutdown") OrderedScheduler scheduler = OrderedScheduler.newSchedulerBuilder() + .name("test-scheduler") + .numThreads(1) + .build(); + @Cleanup MetadataClientDriver driver = + MetadataDrivers.getClientDriver(URI.create(baseClientConf.getMetadataServiceUri())); + driver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.of(zkc)); + + // there is no easy way to validate if the Auditor has executed Audit process (Auditor.startAudit), + // without shuttingdown Bookie. To test if by resetting LostBookieRecoveryDelay it does Auditing + // even when there is no pending AuditTask, following approach is needed. + + // Here we are creating few ledgers ledgermetadata with non-existing bookies as its ensemble. + // When Auditor does audit it recognizes these ledgers as underreplicated and mark them as + // under-replicated, since these bookies are not available. + int numofledgers = 5; + Random rand = new Random(); + for (int i = 0; i < numofledgers; i++) { + ArrayList ensemble = new ArrayList(); + ensemble.add(new BookieSocketAddress("99.99.99.99:9999").toBookieId()); + ensemble.add(new BookieSocketAddress("11.11.11.11:1111").toBookieId()); + ensemble.add(new BookieSocketAddress("88.88.88.88:8888").toBookieId()); + + long ledgerId = (Math.abs(rand.nextLong())) % 100000000; + + LedgerMetadata metadata = LedgerMetadataBuilder.create() + .withId(ledgerId) + .withEnsembleSize(3).withWriteQuorumSize(2).withAckQuorumSize(2) + .withPassword("passwd".getBytes()) + .withDigestType(DigestType.CRC32.toApiDigestType()) + .newEnsembleEntry(0L, ensemble).build(); + + try (LedgerManager lm = driver.getLedgerManagerFactory().newLedgerManager()) { + lm.createLedgerMetadata(ledgerId, metadata).get(2000, TimeUnit.MILLISECONDS); + } + ledgerList.add(ledgerId); + } + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList.size()); + urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelayBeforeChange); + assertTrue("Audit should be triggered and created ledgers should be marked as underreplicated", + underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("All the ledgers should be marked as underreplicated", ledgerList.size(), urLedgerList.size()); + + auditTask = auditorBookiesAuditor.getAuditTask(); + assertEquals("auditTask is supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to BaseConf's lostBookieRecoveryDelay", + lostBookieRecoveryDelayBeforeChange, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange()); + } + + @Test + public void testTriggerAuditorWithPendingAuditTask() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + Auditor auditorBookiesAuditor = getAuditorBookiesAuditor(); + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + int lostBookieRecoveryDelay = 5; + // wait for 5 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelay); + + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + new Thread(() -> { + try { + shutDownNonAuditorBookie(); + } catch (Exception ignore) { + } + }).start(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + Future auditTask = auditorBookiesAuditor.getAuditTask(); + assertNotSame("auditTask is not supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to what we set", + lostBookieRecoveryDelay, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange()); + + // set lostBookieRecoveryDelay to 5 (previous value), so that Auditor is triggered immediately + urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelay); + assertTrue("audit of lost bookie shouldn't be delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("all under replicated ledgers should be identified", ledgerList.size(), + urLedgerList.size()); + + Thread.sleep(100); + auditTask = auditorBookiesAuditor.getAuditTask(); + assertEquals("auditTask is supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to previously set value", + lostBookieRecoveryDelay, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange()); + } + + @Test + public void testTriggerAuditorBySettingDelayToZeroWithPendingAuditTask() throws Exception { + // wait for a second so that the initial periodic check finishes + Thread.sleep(1000); + + Auditor auditorBookiesAuditor = getAuditorBookiesAuditor(); + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + final CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList + .size()); + + int lostBookieRecoveryDelay = 5; + // wait for 5 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(lostBookieRecoveryDelay); + + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + new Thread(() -> { + try { + shutDownNonAuditorBookie(); + } catch (Exception ignore) { + } + }).start(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ledgers to be marked as under replicated"); + } + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + Future auditTask = auditorBookiesAuditor.getAuditTask(); + assertNotSame("auditTask is not supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to what we set", + lostBookieRecoveryDelay, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange()); + + // set lostBookieRecoveryDelay to 0, so that Auditor is triggered immediately + urLedgerMgr.setLostBookieRecoveryDelay(0); + assertTrue("audit of lost bookie shouldn't be delayed", underReplicaLatch.await(1, TimeUnit.SECONDS)); + assertEquals("all under replicated ledgers should be identified", ledgerList.size(), + urLedgerList.size()); + + Thread.sleep(100); + auditTask = auditorBookiesAuditor.getAuditTask(); + assertEquals("auditTask is supposed to be null", null, auditTask); + assertEquals( + "lostBookieRecoveryDelayBeforeChange of Auditor should be equal to previously set value", + 0, auditorBookiesAuditor.getLostBookieRecoveryDelayBeforeChange()); + } + + /** + * Test audit of bookies is delayed when one bookie is down. But when + * another one goes down, the audit is started immediately. + */ + @Test + public void testDelayedAuditWithMultipleBookieFailures() throws Exception { + // wait for the periodic bookie check to finish + Thread.sleep(1000); + + // create a ledger with a bunch of entries + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList.size()); + + // wait for 10 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(10); + + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + AtomicReference shutdownBookieRef1 = new AtomicReference<>(); + CountDownLatch shutdownLatch1 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie1 = shutDownNonAuditorBookie(); + shutdownBookieRef1.set(shutdownBookie1); + shutdownLatch1.countDown(); + } catch (Exception ignore) { + } + }).start(); + + // wait for 3 seconds and there shouldn't be any under replicated ledgers + // because we have delayed the start of audit by 10 seconds + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(3, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // Now shutdown the second non auditor bookie; We want to make sure that + // the history about having delayed recovery remains. Hence we make sure + // we bring down a non auditor bookie. This should cause the audit to take + // place immediately and not wait for the remaining 7 seconds to elapse + AtomicReference shutdownBookieRef2 = new AtomicReference<>(); + CountDownLatch shutdownLatch2 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie2 = shutDownNonAuditorBookie(); + shutdownBookieRef2.set(shutdownBookie2); + shutdownLatch2.countDown(); + } catch (Exception ignore) { + } + }).start(); + + // 2 second grace period for the ledgers to get reported as under replicated + Thread.sleep(2000); + + // If the following checks pass, it means that audit happened + // within 2 seconds of second bookie going down and it didn't + // wait for 7 more seconds. Hence the second bookie failure doesn't + // delay the audit + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + Map urLedgerData = getUrLedgerData(urLedgerList); + String data = urLedgerData.get(ledgerId); + shutdownLatch1.await(); + shutdownLatch2.await(); + assertTrue("Bookie " + shutdownBookieRef1.get() + shutdownBookieRef2.get() + + " are not listed in the ledger as missing replicas :" + data, + data.contains(shutdownBookieRef1.get()) && data.contains(shutdownBookieRef2.get())); + } + + /** + * Test audit of bookies is delayed during rolling upgrade scenario: + * a bookies goes down and comes up, the next bookie go down and up and so on. + * At any time only one bookie is down. + */ + @Test + public void testDelayedAuditWithRollingUpgrade() throws Exception { + // wait for the periodic bookie check to finish + Thread.sleep(1000); + + // create a ledger with a bunch of entries + LedgerHandle lh1 = createAndAddEntriesToLedger(); + Long ledgerId = lh1.getId(); + if (LOG.isDebugEnabled()) { + LOG.debug("Created ledger : " + ledgerId); + } + ledgerList.add(ledgerId); + lh1.close(); + + CountDownLatch underReplicaLatch = registerUrLedgerWatcher(ledgerList.size()); + + // wait for 5 seconds before starting the recovery work when a bookie fails + urLedgerMgr.setLostBookieRecoveryDelay(5); + + // shutdown a non auditor bookie to avoid an election + int idx1 = getShutDownNonAuditorBookieIdx(""); + ServerConfiguration conf1 = confByIndex(idx1); + AtomicReference shutdownBookieRef1 = new AtomicReference<>(); + CountDownLatch shutdownLatch1 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie1 = shutdownBookie(idx1); + shutdownBookieRef1.set(shutdownBookie1); + shutdownLatch1.countDown(); + } catch (Exception ignore) { + } + }).start(); + + // wait for 2 seconds and there shouldn't be any under replicated ledgers + // because we have delayed the start of audit by 5 seconds + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // restart the bookie we shut down above + startAndAddBookie(conf1); + + // Now to simulate the rolling upgrade, bring down a bookie different from + // the one we brought down/up above. + // shutdown a non auditor bookie; choosing non-auditor to avoid another election + AtomicReference shutdownBookieRef2 = new AtomicReference<>(); + CountDownLatch shutdownLatch2 = new CountDownLatch(1); + new Thread(() -> { + try { + String shutdownBookie2 = shutDownNonAuditorBookie(); + shutdownBookieRef2.set(shutdownBookie2); + shutdownLatch2.countDown(); + } catch (Exception ignore) { + } + }).start(); + + // since the first bookie that was brought down/up has come up, there is only + // one bookie down at this time. Hence the lost bookie check shouldn't start + // immediately; it will start 5 seconds after the second bookie went down + assertFalse("audit of lost bookie isn't delayed", underReplicaLatch.await(2, TimeUnit.SECONDS)); + assertEquals("under replicated ledgers identified when it was not expected", 0, + urLedgerList.size()); + + // wait for a total of 6 seconds(2+4) for the ledgers to get reported as under replicated + Thread.sleep(4000); + + // If the following checks pass, it means that auditing happened + // after lostBookieRecoveryDelay during rolling upgrade as expected + assertTrue("Ledger is not marked as underreplicated:" + ledgerId, + urLedgerList.contains(ledgerId)); + Map urLedgerData = getUrLedgerData(urLedgerList); + String data = urLedgerData.get(ledgerId); + shutdownLatch1.await(); + shutdownLatch2.await(); + assertTrue("Bookie " + shutdownBookieRef1.get() + "wrongly listed as missing the ledger: " + data, + !data.contains(shutdownBookieRef1.get())); + assertTrue("Bookie " + shutdownBookieRef2.get() + + " is not listed in the ledger as missing replicas :" + data, + data.contains(shutdownBookieRef2.get())); + LOG.info("*****************Test Complete"); + } + + private void waitForAuditToComplete() throws Exception { + long endTime = System.currentTimeMillis() + 5_000; + while (System.currentTimeMillis() < endTime) { + Auditor auditor = getAuditorBookiesAuditor(); + if (auditor != null) { + Future task = auditor.submitAuditTask(); + task.get(5, TimeUnit.SECONDS); + return; + } + Thread.sleep(100); + } + throw new TimeoutException("Could not find an audit within 5 seconds"); + } + + /** + * Wait for ledger to be underreplicated, and to be missing all replicas specified. + */ + private boolean waitForLedgerMissingReplicas(Long ledgerId, long secondsToWait, String... replicas) + throws Exception { + for (int i = 0; i < secondsToWait; i++) { + try { + UnderreplicatedLedger data = urLedgerMgr.getLedgerUnreplicationInfo(ledgerId); + boolean all = true; + for (String r : replicas) { + all = all && data.getReplicaList().contains(r); + } + if (all) { + return true; + } + } catch (Exception e) { + // may not find node + } + Thread.sleep(1000); + } + return false; + } + + private CountDownLatch registerUrLedgerWatcher(int count) + throws KeeperException, InterruptedException { + final CountDownLatch underReplicaLatch = new CountDownLatch(count); + for (Long ledgerId : ledgerList) { + Watcher urLedgerWatcher = new ChildWatcher(underReplicaLatch); + String znode = ZkLedgerUnderreplicationManager.getUrLedgerZnode(underreplicatedPath, + ledgerId); + zkc.exists(znode, urLedgerWatcher); + } + return underReplicaLatch; + } + + private void doLedgerRereplication(Long... ledgerIds) + throws UnavailableException { + for (int i = 0; i < ledgerIds.length; i++) { + long lid = urLedgerMgr.getLedgerToRereplicate(); + assertTrue("Received unexpected ledgerid", Arrays.asList(ledgerIds).contains(lid)); + urLedgerMgr.markLedgerReplicated(lid); + urLedgerMgr.releaseUnderreplicatedLedger(lid); + } + } + + private String shutdownBookie(int bkShutdownIndex) throws Exception { + BookieServer bkServer = serverByIndex(bkShutdownIndex); + String bookieAddr = bkServer.getBookieId().toString(); + if (LOG.isInfoEnabled()) { + LOG.info("Shutting down bookie:" + bookieAddr); + } + killBookie(bkShutdownIndex); + auditorElectors.get(bookieAddr).shutdown(); + auditorElectors.remove(bookieAddr); + return bookieAddr; + } + + private LedgerHandle createAndAddEntriesToLedger() throws BKException, + InterruptedException { + int numEntriesToWrite = 100; + // Create a ledger + LedgerHandle lh = bkc.createLedger(digestType, ledgerPassword); + LOG.info("Ledger ID: " + lh.getId()); + addEntry(numEntriesToWrite, lh); + return lh; + } + + private void addEntry(int numEntriesToWrite, LedgerHandle lh) + throws InterruptedException, BKException { + final CountDownLatch completeLatch = new CountDownLatch(numEntriesToWrite); + final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); + + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(Integer.MAX_VALUE)); + entry.position(0); + lh.asyncAddEntry(entry.array(), new AddCallback() { + public void addComplete(int rc2, LedgerHandle lh, long entryId, Object ctx) { + rc.compareAndSet(BKException.Code.OK, rc2); + completeLatch.countDown(); + } + }, null); + } + completeLatch.await(); + if (rc.get() != BKException.Code.OK) { + throw BKException.create(rc.get()); + } + + } + + private Map getUrLedgerData(Set urLedgerList) + throws KeeperException, InterruptedException { + Map urLedgerData = new HashMap(); + for (Long ledgerId : urLedgerList) { + String znode = ZkLedgerUnderreplicationManager.getUrLedgerZnode(underreplicatedPath, + ledgerId); + byte[] data = zkc.getData(znode, false, null); + urLedgerData.put(ledgerId, new String(data)); + } + return urLedgerData; + } + + private class ChildWatcher implements Watcher { + private final CountDownLatch underReplicaLatch; + + public ChildWatcher(CountDownLatch underReplicaLatch) { + this.underReplicaLatch = underReplicaLatch; + } + + @Override + public void process(WatchedEvent event) { + LOG.info("Received notification for the ledger path : " + + event.getPath()); + for (Long ledgerId : ledgerList) { + if (event.getPath().contains(ledgerId + "")) { + urLedgerList.add(ledgerId); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("Count down and waiting for next notification"); + } + // count down and waiting for next notification + underReplicaLatch.countDown(); + } + } + + private BookieServer getAuditorBookie() throws Exception { + List auditors = new LinkedList(); + byte[] data = zkc.getData(electionPath, false, null); + assertNotNull("Auditor election failed", data); + for (int i = 0; i < bookieCount(); i++) { + BookieId bookieId = addressByIndex(i); + if (new String(data).contains(bookieId + "")) { + auditors.add(serverByIndex(i)); + } + } + assertEquals("Multiple Bookies acting as Auditor!", 1, auditors + .size()); + return auditors.get(0); + } + + private Auditor getAuditorBookiesAuditor() throws Exception { + BookieServer auditorBookieServer = getAuditorBookie(); + String bookieAddr = auditorBookieServer.getBookieId().toString(); + return auditorElectors.get(bookieAddr).auditor; + } + + private String shutDownNonAuditorBookie() throws Exception { + // shutdown bookie which is not an auditor + int indexOf = indexOfServer(getAuditorBookie()); + int bkIndexDownBookie; + if (indexOf < lastBookieIndex()) { + bkIndexDownBookie = indexOf + 1; + } else { + bkIndexDownBookie = indexOf - 1; + } + return shutdownBookie(bkIndexDownBookie); + } + + private int getShutDownNonAuditorBookieIdx(String exclude) throws Exception { + // shutdown bookie which is not an auditor + int indexOf = indexOfServer(getAuditorBookie()); + int bkIndexDownBookie = 0; + for (int i = 0; i <= lastBookieIndex(); i++) { + if (i == indexOf || addressByIndex(i).toString().equals(exclude)) { + continue; + } + bkIndexDownBookie = i; + break; + } + return bkIndexDownBookie; + } + + private String shutDownNonAuditorBookie(String exclude) throws Exception { + return shutdownBookie(getShutDownNonAuditorBookieIdx(exclude)); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicBookieCheckTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicBookieCheckTest.java new file mode 100644 index 0000000000000..9e8c5a54a5d91 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicBookieCheckTest.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.meta.MetadataDrivers.runFunctionWithLedgerManagerFactory; +import static org.testng.AssertJUnit.assertEquals; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.UncheckedExecutionException; +import lombok.Cleanup; +import org.apache.bookkeeper.client.ClientUtil; +import org.apache.bookkeeper.client.LedgerMetadataBuilder; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.conf.TestBKConfiguration; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.net.BookieSocketAddress; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * This test verifies that the period check on the auditor + * will pick up on missing data in the client. + */ +public class AuditorPeriodicBookieCheckTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(AuditorPeriodicBookieCheckTest.class); + + private AuditorElector auditorElector = null; + + private static final int CHECK_INTERVAL = 1; // run every second + + public AuditorPeriodicBookieCheckTest() throws Exception { + super(3); + baseConf.setPageLimit(1); // to make it easy to push ledger out of cache + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + + ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); + conf.setAuditorPeriodicBookieCheckInterval(CHECK_INTERVAL); + + conf.setMetadataServiceUri( + metadataServiceUri.replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + conf.setProperty("clientConnectTimeoutMillis", 500); + String addr = addressByIndex(0).toString(); + + auditorElector = new AuditorElector(addr, conf); + auditorElector.start(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + auditorElector.shutdown(); + super.tearDown(); + } + + /** + * Test that the periodic bookie checker works. + */ + @Test + public void testPeriodicBookieCheckInterval() throws Exception { + confByIndex(0).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + runFunctionWithLedgerManagerFactory(confByIndex(0), mFactory -> { + try (LedgerManager ledgerManager = mFactory.newLedgerManager()) { + @Cleanup final LedgerUnderreplicationManager underReplicationManager = + mFactory.newLedgerUnderreplicationManager(); + long ledgerId = 12345L; + ClientUtil.setupLedger(bkc.getLedgerManager(), ledgerId, + LedgerMetadataBuilder.create().withEnsembleSize(3) + .withWriteQuorumSize(3).withAckQuorumSize(3) + .newEnsembleEntry(0L, Lists.newArrayList( + new BookieSocketAddress("192.0.2.1", 1000).toBookieId(), + getBookie(0), + getBookie(1)))); + long underReplicatedLedger = -1; + for (int i = 0; i < 10; i++) { + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + if (underReplicatedLedger != -1) { + break; + } + Thread.sleep(CHECK_INTERVAL * 1000); + } + assertEquals("Ledger should be under replicated", ledgerId, underReplicatedLedger); + } catch (Exception e) { + throw new UncheckedExecutionException(e.getMessage(), e); + } + return null; + }); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicCheckTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicCheckTest.java new file mode 100644 index 0000000000000..9c5805dc536d6 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPeriodicCheckTest.java @@ -0,0 +1,1070 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertNotSame; +import static org.testng.AssertJUnit.assertTrue; +import static org.testng.AssertJUnit.fail; +import io.netty.buffer.ByteBuf; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FilenameFilter; +import java.io.IOException; +import java.net.URI; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.bookkeeper.bookie.Bookie; +import org.apache.bookkeeper.bookie.BookieAccessor; +import org.apache.bookkeeper.bookie.BookieException; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.bookie.IndexPersistenceMgr; +import org.apache.bookkeeper.bookie.TestBookieImpl; +import org.apache.bookkeeper.client.AsyncCallback.AddCallback; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeper.DigestType; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataBookieDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback; +import org.apache.bookkeeper.replication.ReplicationException.UnavailableException; +import org.apache.bookkeeper.stats.Counter; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.stats.StatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.bookkeeper.test.TestStatsProvider.TestOpStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider.TestStatsLogger; +import org.awaitility.Awaitility; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * This test verifies that the period check on the auditor + * will pick up on missing data in the client. + */ +public class AuditorPeriodicCheckTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(AuditorPeriodicCheckTest.class); + + private MetadataBookieDriver driver; + private HashMap auditorElectors = new HashMap(); + + private static final int CHECK_INTERVAL = 1; // run every second + + public AuditorPeriodicCheckTest() throws Exception { + super(3); + baseConf.setPageLimit(1); // to make it easy to push ledger out of cache + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + + for (int i = 0; i < numBookies; i++) { + ServerConfiguration conf = new ServerConfiguration(confByIndex(i)); + conf.setAuditorPeriodicCheckInterval(CHECK_INTERVAL); + conf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + + String addr = addressByIndex(i).toString(); + + AuditorElector auditorElector = new AuditorElector(addr, conf); + auditorElectors.put(addr, auditorElector); + auditorElector.start(); + if (LOG.isDebugEnabled()) { + LOG.debug("Starting Auditor Elector"); + } + } + + URI uri = URI.create(confByIndex(0).getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver = MetadataDrivers.getBookieDriver(uri); + ServerConfiguration serverConfiguration = new ServerConfiguration(confByIndex(0)); + serverConfiguration.setMetadataServiceUri( + serverConfiguration.getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver.initialize(serverConfiguration, NullStatsLogger.INSTANCE); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (null != driver) { + driver.close(); + } + + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + super.tearDown(); + } + + /** + * test that the periodic checking will detect corruptions in + * the bookie entry log. + */ + @Test + public void testEntryLogCorruption() throws Exception { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + underReplicationManager.disableLedgerReplication(); + + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + long ledgerId = lh.getId(); + for (int i = 0; i < 100; i++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + + BookieAccessor.forceFlush((BookieImpl) serverByIndex(0).getBookie()); + + + File ledgerDir = confByIndex(0).getLedgerDirs()[0]; + ledgerDir = BookieImpl.getCurrentDirectory(ledgerDir); + // corrupt of entryLogs + File[] entryLogs = ledgerDir.listFiles(new FilenameFilter() { + public boolean accept(File dir, String name) { + return name.endsWith(".log"); + } + }); + ByteBuffer junk = ByteBuffer.allocate(1024 * 1024); + for (File f : entryLogs) { + FileOutputStream out = new FileOutputStream(f); + out.getChannel().write(junk); + out.close(); + } + restartBookies(); // restart to clear read buffers + + underReplicationManager.enableLedgerReplication(); + long underReplicatedLedger = -1; + for (int i = 0; i < 10; i++) { + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + if (underReplicatedLedger != -1) { + break; + } + Thread.sleep(CHECK_INTERVAL * 1000); + } + assertEquals("Ledger should be under replicated", ledgerId, underReplicatedLedger); + underReplicationManager.close(); + } + + /** + * test that the period checker will detect corruptions in + * the bookie index files. + */ + @Test + public void testIndexCorruption() throws Exception { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + + LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + long ledgerToCorrupt = lh.getId(); + for (int i = 0; i < 100; i++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + + // push ledgerToCorrupt out of page cache (bookie is configured to only use 1 page) + lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + for (int i = 0; i < 100; i++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + + BookieAccessor.forceFlush((BookieImpl) serverByIndex(0).getBookie()); + + File ledgerDir = confByIndex(0).getLedgerDirs()[0]; + ledgerDir = BookieImpl.getCurrentDirectory(ledgerDir); + + // corrupt of entryLogs + File index = new File(ledgerDir, IndexPersistenceMgr.getLedgerName(ledgerToCorrupt)); + LOG.info("file to corrupt{}", index); + ByteBuffer junk = ByteBuffer.allocate(1024 * 1024); + FileOutputStream out = new FileOutputStream(index); + out.getChannel().write(junk); + out.close(); + + long underReplicatedLedger = -1; + for (int i = 0; i < 15; i++) { + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + if (underReplicatedLedger != -1) { + break; + } + Thread.sleep(CHECK_INTERVAL * 1000); + } + assertEquals("Ledger should be under replicated", ledgerToCorrupt, underReplicatedLedger); + underReplicationManager.close(); + } + + /** + * Test that the period checker will not run when auto replication has been disabled. + */ + @Test + public void testPeriodicCheckWhenDisabled() throws Exception { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + final LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + final int numLedgers = 10; + final int numMsgs = 2; + final CountDownLatch completeLatch = new CountDownLatch(numMsgs * numLedgers); + final AtomicInteger rc = new AtomicInteger(BKException.Code.OK); + + List lhs = new ArrayList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + lhs.add(lh); + for (int j = 0; j < 2; j++) { + lh.asyncAddEntry("testdata".getBytes(), new AddCallback() { + public void addComplete(int rc2, LedgerHandle lh, long entryId, Object ctx) { + if (rc.compareAndSet(BKException.Code.OK, rc2)) { + LOG.info("Failed to add entry : {}", BKException.getMessage(rc2)); + } + completeLatch.countDown(); + } + }, null); + } + } + completeLatch.await(); + if (rc.get() != BKException.Code.OK) { + throw BKException.create(rc.get()); + } + + for (LedgerHandle lh : lhs) { + lh.close(); + } + + underReplicationManager.disableLedgerReplication(); + + final AtomicInteger numReads = new AtomicInteger(0); + ServerConfiguration conf = killBookie(0); + + Bookie deadBookie = new TestBookieImpl(conf) { + @Override + public ByteBuf readEntry(long ledgerId, long entryId) + throws IOException, NoLedgerException { + // we want to disable during checking + numReads.incrementAndGet(); + throw new IOException("Fake I/O exception"); + } + }; + startAndAddBookie(conf, deadBookie); + + Thread.sleep(CHECK_INTERVAL * 2000); + assertEquals("Nothing should have tried to read", 0, numReads.get()); + underReplicationManager.enableLedgerReplication(); + Thread.sleep(CHECK_INTERVAL * 2000); // give it time to run + + underReplicationManager.disableLedgerReplication(); + // give it time to stop, from this point nothing new should be marked + Thread.sleep(CHECK_INTERVAL * 2000); + + int numUnderreplicated = 0; + long underReplicatedLedger = -1; + do { + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + if (underReplicatedLedger == -1) { + break; + } + numUnderreplicated++; + + underReplicationManager.markLedgerReplicated(underReplicatedLedger); + } while (underReplicatedLedger != -1); + + Thread.sleep(CHECK_INTERVAL * 2000); // give a chance to run again (it shouldn't, it's disabled) + + // ensure that nothing is marked as underreplicated + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + assertEquals("There should be no underreplicated ledgers", -1, underReplicatedLedger); + + LOG.info("{} of {} ledgers underreplicated", numUnderreplicated, numUnderreplicated); + assertTrue("All should be underreplicated", + numUnderreplicated <= numLedgers && numUnderreplicated > 0); + } + + /** + * Test that the period check will succeed if a ledger is deleted midway. + */ + @Test + public void testPeriodicCheckWhenLedgerDeleted() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + try (final Auditor auditor = new Auditor( + BookieImpl.getBookieId(confByIndex(0)).toString(), + confByIndex(0), NullStatsLogger.INSTANCE)) { + final AtomicBoolean exceptionCaught = new AtomicBoolean(false); + final CountDownLatch latch = new CountDownLatch(1); + Thread t = new Thread() { + public void run() { + try { + latch.countDown(); + for (int i = 0; i < numLedgers; i++) { + ((AuditorCheckAllLedgersTask) auditor.auditorCheckAllLedgersTask).checkAllLedgers(); + } + } catch (Exception e) { + LOG.error("Caught exception while checking all ledgers", e); + exceptionCaught.set(true); + } + } + }; + t.start(); + latch.await(); + for (Long id : ids) { + bkc.deleteLedger(id); + } + t.join(); + assertFalse("Shouldn't have thrown exception", exceptionCaught.get()); + } + } + + @Test + public void testGetLedgerFromZookeeperThrottled() throws Exception { + final int numberLedgers = 30; + + // write ledgers into bookkeeper cluster + try { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + for (int i = 0; i < numberLedgers; ++i) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + for (int j = 0; j < 5; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + } catch (InterruptedException | BKException e) { + LOG.error("Failed to shutdown auditor elector or write data to ledgers ", e); + fail(); + } + + // create auditor and call `checkAllLedgers` + ServerConfiguration configuration = confByIndex(0); + configuration.setAuditorMaxNumberOfConcurrentOpenLedgerOperations(10); + + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + Counter numLedgersChecked = statsLogger + .getCounter(ReplicationStats.NUM_LEDGERS_CHECKED); + Auditor auditor = new Auditor(BookieImpl.getBookieId(configuration).toString(), + configuration, statsLogger); + + try { + ((AuditorCheckAllLedgersTask) auditor.auditorCheckAllLedgersTask).checkAllLedgers(); + assertEquals("NUM_LEDGERS_CHECKED", numberLedgers, (long) numLedgersChecked.get()); + } catch (Exception e) { + LOG.error("Caught exception while checking all ledgers ", e); + fail(); + } + } + + @Test + public void testInitialDelayOfCheckAllLedgers() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + validateInitialDelayOfCheckAllLedgers(urm, -1, 1000, servConf, bkc); + validateInitialDelayOfCheckAllLedgers(urm, 999, 1000, servConf, bkc); + validateInitialDelayOfCheckAllLedgers(urm, 1001, 1000, servConf, bkc); + } + + void validateInitialDelayOfCheckAllLedgers(LedgerUnderreplicationManager urm, long timeSinceLastExecutedInSecs, + long auditorPeriodicCheckInterval, ServerConfiguration servConf, + BookKeeper bkc) + throws UnavailableException, UnknownHostException, InterruptedException { + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestOpStatsLogger checkAllLedgersStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.CHECK_ALL_LEDGERS_TIME); + servConf.setAuditorPeriodicCheckInterval(auditorPeriodicCheckInterval); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(0); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, false, + statsLogger, null); + CountDownLatch latch = auditor.getLatch(); + assertEquals("CHECK_ALL_LEDGERS_TIME SuccessCount", 0, checkAllLedgersStatsLogger.getSuccessCount()); + long curTimeBeforeStart = System.currentTimeMillis(); + long checkAllLedgersCTime = -1; + long initialDelayInMsecs = -1; + long nextExpectedCheckAllLedgersExecutionTime = -1; + long bufferTimeInMsecs = 12000L; + if (timeSinceLastExecutedInSecs == -1) { + /* + * if we are setting checkAllLedgersCTime to -1, it means that + * checkAllLedgers hasn't run before. So initialDelay for + * checkAllLedgers should be 0. + */ + checkAllLedgersCTime = -1; + initialDelayInMsecs = 0; + } else { + checkAllLedgersCTime = curTimeBeforeStart - timeSinceLastExecutedInSecs * 1000L; + initialDelayInMsecs = timeSinceLastExecutedInSecs > auditorPeriodicCheckInterval ? 0 + : (auditorPeriodicCheckInterval - timeSinceLastExecutedInSecs) * 1000L; + } + /* + * next checkAllLedgers should happen atleast after + * nextExpectedCheckAllLedgersExecutionTime. + */ + nextExpectedCheckAllLedgersExecutionTime = curTimeBeforeStart + initialDelayInMsecs; + + urm.setCheckAllLedgersCTime(checkAllLedgersCTime); + auditor.start(); + /* + * since auditorPeriodicCheckInterval are higher values (in the order of + * 100s of seconds), its ok bufferTimeInMsecs to be ` 10 secs. + */ + assertTrue("checkAllLedgers should have executed with initialDelay " + initialDelayInMsecs, + latch.await(initialDelayInMsecs + bufferTimeInMsecs, TimeUnit.MILLISECONDS)); + for (int i = 0; i < 10; i++) { + Thread.sleep(100); + if (checkAllLedgersStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("CHECK_ALL_LEDGERS_TIME SuccessCount", 1, checkAllLedgersStatsLogger.getSuccessCount()); + long currentCheckAllLedgersCTime = urm.getCheckAllLedgersCTime(); + assertTrue( + "currentCheckAllLedgersCTime: " + currentCheckAllLedgersCTime + + " should be greater than nextExpectedCheckAllLedgersExecutionTime: " + + nextExpectedCheckAllLedgersExecutionTime, + currentCheckAllLedgersCTime > nextExpectedCheckAllLedgersExecutionTime); + assertTrue( + "currentCheckAllLedgersCTime: " + currentCheckAllLedgersCTime + + " should be lesser than nextExpectedCheckAllLedgersExecutionTime+bufferTimeInMsecs: " + + (nextExpectedCheckAllLedgersExecutionTime + bufferTimeInMsecs), + currentCheckAllLedgersCTime < (nextExpectedCheckAllLedgersExecutionTime + bufferTimeInMsecs)); + auditor.close(); + } + + @Test + public void testInitialDelayOfPlacementPolicyCheck() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + validateInitialDelayOfPlacementPolicyCheck(urm, -1, 1000, servConf, bkc); + validateInitialDelayOfPlacementPolicyCheck(urm, 999, 1000, servConf, bkc); + validateInitialDelayOfPlacementPolicyCheck(urm, 1001, 1000, servConf, bkc); + } + + void validateInitialDelayOfPlacementPolicyCheck(LedgerUnderreplicationManager urm, long timeSinceLastExecutedInSecs, + long auditorPeriodicPlacementPolicyCheckInterval, + ServerConfiguration servConf, BookKeeper bkc) + throws UnavailableException, UnknownHostException, InterruptedException { + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestOpStatsLogger placementPolicyCheckStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.PLACEMENT_POLICY_CHECK_TIME); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(auditorPeriodicPlacementPolicyCheckInterval); + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(0); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, false, + statsLogger, null); + CountDownLatch latch = auditor.getLatch(); + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 0, placementPolicyCheckStatsLogger.getSuccessCount()); + long curTimeBeforeStart = System.currentTimeMillis(); + long placementPolicyCheckCTime = -1; + long initialDelayInMsecs = -1; + long nextExpectedPlacementPolicyCheckExecutionTime = -1; + long bufferTimeInMsecs = 20000L; + if (timeSinceLastExecutedInSecs == -1) { + /* + * if we are setting placementPolicyCheckCTime to -1, it means that + * placementPolicyCheck hasn't run before. So initialDelay for + * placementPolicyCheck should be 0. + */ + placementPolicyCheckCTime = -1; + initialDelayInMsecs = 0; + } else { + placementPolicyCheckCTime = curTimeBeforeStart - timeSinceLastExecutedInSecs * 1000L; + initialDelayInMsecs = timeSinceLastExecutedInSecs > auditorPeriodicPlacementPolicyCheckInterval ? 0 + : (auditorPeriodicPlacementPolicyCheckInterval - timeSinceLastExecutedInSecs) * 1000L; + } + /* + * next placementPolicyCheck should happen atleast after + * nextExpectedPlacementPolicyCheckExecutionTime. + */ + nextExpectedPlacementPolicyCheckExecutionTime = curTimeBeforeStart + initialDelayInMsecs; + + urm.setPlacementPolicyCheckCTime(placementPolicyCheckCTime); + auditor.start(); + /* + * since auditorPeriodicPlacementPolicyCheckInterval are higher values (in the + * order of 100s of seconds), its ok bufferTimeInMsecs to be ` 20 secs. + */ + assertTrue("placementPolicyCheck should have executed with initialDelay " + initialDelayInMsecs, + latch.await(initialDelayInMsecs + bufferTimeInMsecs, TimeUnit.MILLISECONDS)); + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (placementPolicyCheckStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 1, placementPolicyCheckStatsLogger.getSuccessCount()); + long currentPlacementPolicyCheckCTime = urm.getPlacementPolicyCheckCTime(); + assertTrue( + "currentPlacementPolicyCheckCTime: " + currentPlacementPolicyCheckCTime + + " should be greater than nextExpectedPlacementPolicyCheckExecutionTime: " + + nextExpectedPlacementPolicyCheckExecutionTime, + currentPlacementPolicyCheckCTime > nextExpectedPlacementPolicyCheckExecutionTime); + assertTrue( + "currentPlacementPolicyCheckCTime: " + currentPlacementPolicyCheckCTime + + " should be lesser than nextExpectedPlacementPolicyCheckExecutionTime+bufferTimeInMsecs: " + + (nextExpectedPlacementPolicyCheckExecutionTime + bufferTimeInMsecs), + currentPlacementPolicyCheckCTime < (nextExpectedPlacementPolicyCheckExecutionTime + bufferTimeInMsecs)); + auditor.close(); + } + + @Test + public void testInitialDelayOfReplicasCheck() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + LedgerHandle lh = bkc.createLedger(3, 2, DigestType.CRC32, "passwd".getBytes()); + for (int j = 0; j < 5; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + + long ledgerId = 100000L; + lh = bkc.createLedgerAdv(ledgerId, 3, 2, 2, DigestType.CRC32, "passwd".getBytes(), null); + lh.close(); + + ledgerId = 100001234L; + lh = bkc.createLedgerAdv(ledgerId, 3, 3, 2, DigestType.CRC32, "passwd".getBytes(), null); + for (int j = 0; j < 4; j++) { + lh.addEntry(j, "testdata".getBytes()); + } + lh.close(); + + ledgerId = 991234L; + lh = bkc.createLedgerAdv(ledgerId, 3, 2, 2, DigestType.CRC32, "passwd".getBytes(), null); + lh.addEntry(0, "testdata".getBytes()); + lh.close(); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + validateInitialDelayOfReplicasCheck(urm, -1, 1000, servConf, bkc); + validateInitialDelayOfReplicasCheck(urm, 999, 1000, servConf, bkc); + validateInitialDelayOfReplicasCheck(urm, 1001, 1000, servConf, bkc); + } + + void validateInitialDelayOfReplicasCheck(LedgerUnderreplicationManager urm, long timeSinceLastExecutedInSecs, + long auditorPeriodicReplicasCheckInterval, ServerConfiguration servConf, + BookKeeper bkc) + throws UnavailableException, UnknownHostException, InterruptedException { + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestOpStatsLogger replicasCheckStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.REPLICAS_CHECK_TIME); + servConf.setAuditorPeriodicReplicasCheckInterval(auditorPeriodicReplicasCheckInterval); + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(0); + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, false, + statsLogger, null); + CountDownLatch latch = auditor.getLatch(); + assertEquals("REPLICAS_CHECK_TIME SuccessCount", 0, replicasCheckStatsLogger.getSuccessCount()); + long curTimeBeforeStart = System.currentTimeMillis(); + long replicasCheckCTime = -1; + long initialDelayInMsecs = -1; + long nextExpectedReplicasCheckExecutionTime = -1; + long bufferTimeInMsecs = 20000L; + if (timeSinceLastExecutedInSecs == -1) { + /* + * if we are setting replicasCheckCTime to -1, it means that + * replicasCheck hasn't run before. So initialDelay for + * replicasCheck should be 0. + */ + replicasCheckCTime = -1; + initialDelayInMsecs = 0; + } else { + replicasCheckCTime = curTimeBeforeStart - timeSinceLastExecutedInSecs * 1000L; + initialDelayInMsecs = timeSinceLastExecutedInSecs > auditorPeriodicReplicasCheckInterval ? 0 + : (auditorPeriodicReplicasCheckInterval - timeSinceLastExecutedInSecs) * 1000L; + } + /* + * next replicasCheck should happen atleast after + * nextExpectedReplicasCheckExecutionTime. + */ + nextExpectedReplicasCheckExecutionTime = curTimeBeforeStart + initialDelayInMsecs; + + urm.setReplicasCheckCTime(replicasCheckCTime); + auditor.start(); + /* + * since auditorPeriodicReplicasCheckInterval are higher values (in the + * order of 100s of seconds), its ok bufferTimeInMsecs to be ` 20 secs. + */ + assertTrue("replicasCheck should have executed with initialDelay " + initialDelayInMsecs, + latch.await(initialDelayInMsecs + bufferTimeInMsecs, TimeUnit.MILLISECONDS)); + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (replicasCheckStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("REPLICAS_CHECK_TIME SuccessCount", 1, replicasCheckStatsLogger.getSuccessCount()); + long currentReplicasCheckCTime = urm.getReplicasCheckCTime(); + assertTrue( + "currentReplicasCheckCTime: " + currentReplicasCheckCTime + + " should be greater than nextExpectedReplicasCheckExecutionTime: " + + nextExpectedReplicasCheckExecutionTime, + currentReplicasCheckCTime > nextExpectedReplicasCheckExecutionTime); + assertTrue( + "currentReplicasCheckCTime: " + currentReplicasCheckCTime + + " should be lesser than nextExpectedReplicasCheckExecutionTime+bufferTimeInMsecs: " + + (nextExpectedReplicasCheckExecutionTime + bufferTimeInMsecs), + currentReplicasCheckCTime < (nextExpectedReplicasCheckExecutionTime + bufferTimeInMsecs)); + auditor.close(); + } + + @Test + public void testDelayBookieAuditOfCheckAllLedgers() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + Counter numBookieAuditsDelayed = + statsLogger.getCounter(ReplicationStats.NUM_BOOKIE_AUDITS_DELAYED); + TestOpStatsLogger underReplicatedLedgerTotalSizeStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.UNDER_REPLICATED_LEDGERS_TOTAL_SIZE); + + servConf.setAuditorPeriodicCheckInterval(1); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(Long.MAX_VALUE); + + urm.setLostBookieRecoveryDelay(Integer.MAX_VALUE); + + AtomicBoolean canRun = new AtomicBoolean(false); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, + false, statsLogger, canRun); + final CountDownLatch latch = auditor.getLatch(); + + auditor.start(); + + killBookie(addressByIndex(0)); + + Awaitility.await().untilAsserted(() -> assertEquals(1, (long) numBookieAuditsDelayed.get())); + final Future auditTask = auditor.auditTask; + assertTrue(auditTask != null && !auditTask.isDone()); + + canRun.set(true); + + assertTrue(latch.await(10, TimeUnit.SECONDS)); + assertTrue(auditor.auditTask.equals(auditTask) + && auditor.auditTask != null && !auditor.auditTask.isDone()); + // wrong num is numLedgers, right num is 0 + assertEquals("UNDER_REPLICATED_LEDGERS_TOTAL_SIZE", + 0, + underReplicatedLedgerTotalSizeStatsLogger.getSuccessCount()); + + auditor.close(); + } + + @Test + public void testDelayBookieAuditOfPlacementPolicy() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + Counter numBookieAuditsDelayed = + statsLogger.getCounter(ReplicationStats.NUM_BOOKIE_AUDITS_DELAYED); + TestOpStatsLogger placementPolicyCheckTime = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.PLACEMENT_POLICY_CHECK_TIME); + + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(1); + servConf.setAuditorPeriodicBookieCheckInterval(Long.MAX_VALUE); + + urm.setLostBookieRecoveryDelay(Integer.MAX_VALUE); + + AtomicBoolean canRun = new AtomicBoolean(false); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, + false, statsLogger, canRun); + final CountDownLatch latch = auditor.getLatch(); + + auditor.start(); + + killBookie(addressByIndex(0)); + + Awaitility.await().untilAsserted(() -> assertEquals(1, (long) numBookieAuditsDelayed.get())); + final Future auditTask = auditor.auditTask; + assertTrue(auditTask != null && !auditTask.isDone()); + assertEquals("PLACEMENT_POLICY_CHECK_TIME", 0, placementPolicyCheckTime.getSuccessCount()); + + canRun.set(true); + + assertTrue(latch.await(10, TimeUnit.SECONDS)); + assertTrue(auditor.auditTask.equals(auditTask) + && auditor.auditTask != null && !auditor.auditTask.isDone()); + // wrong successCount is > 0, right successCount is = 0 + assertEquals("PLACEMENT_POLICY_CHECK_TIME", 0, placementPolicyCheckTime.getSuccessCount()); + + auditor.close(); + } + + @Test + public void testDelayBookieAuditOfReplicasCheck() throws Exception { + for (AuditorElector e : auditorElectors.values()) { + e.shutdown(); + } + + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + Counter numBookieAuditsDelayed = + statsLogger.getCounter(ReplicationStats.NUM_BOOKIE_AUDITS_DELAYED); + TestOpStatsLogger replicasCheckTime = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.REPLICAS_CHECK_TIME); + + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(Long.MAX_VALUE); + servConf.setAuditorPeriodicReplicasCheckInterval(1); + + urm.setLostBookieRecoveryDelay(Integer.MAX_VALUE); + + AtomicBoolean canRun = new AtomicBoolean(false); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, + false, statsLogger, canRun); + final CountDownLatch latch = auditor.getLatch(); + + auditor.start(); + + killBookie(addressByIndex(0)); + + Awaitility.await().untilAsserted(() -> assertEquals(1, (long) numBookieAuditsDelayed.get())); + final Future auditTask = auditor.auditTask; + assertTrue(auditTask != null && !auditTask.isDone()); + assertEquals("REPLICAS_CHECK_TIME", 0, replicasCheckTime.getSuccessCount()); + + canRun.set(true); + + assertTrue(latch.await(10, TimeUnit.SECONDS)); + assertTrue(auditor.auditTask.equals(auditTask) + && auditor.auditTask != null && !auditor.auditTask.isDone()); + // wrong successCount is > 0, right successCount is = 0 + assertEquals("REPLICAS_CHECK_TIME", 0, replicasCheckTime.getSuccessCount()); + + auditor.close(); + } + + static class TestAuditor extends Auditor { + + final AtomicReference latchRef = new AtomicReference(new CountDownLatch(1)); + + public TestAuditor(String bookieIdentifier, ServerConfiguration conf, BookKeeper bkc, boolean ownBkc, + StatsLogger statsLogger, AtomicBoolean exceptedRun) throws UnavailableException { + super(bookieIdentifier, conf, bkc, ownBkc, statsLogger); + renewAuditorTestWrapperTask(exceptedRun); + } + + public TestAuditor(String bookieIdentifier, ServerConfiguration conf, BookKeeper bkc, boolean ownBkc, + BookKeeperAdmin bkadmin, boolean ownadmin, StatsLogger statsLogger, + AtomicBoolean exceptedRun) throws UnavailableException { + super(bookieIdentifier, conf, bkc, ownBkc, bkadmin, ownadmin, statsLogger); + renewAuditorTestWrapperTask(exceptedRun); + } + + public TestAuditor(final String bookieIdentifier, ServerConfiguration conf, StatsLogger statsLogger, + AtomicBoolean exceptedRun) + throws UnavailableException { + super(bookieIdentifier, conf, statsLogger); + renewAuditorTestWrapperTask(exceptedRun); + } + + private void renewAuditorTestWrapperTask(AtomicBoolean exceptedRun) { + super.auditorCheckAllLedgersTask = + new AuditorTestWrapperTask(super.auditorCheckAllLedgersTask, latchRef, exceptedRun); + super.auditorPlacementPolicyCheckTask = + new AuditorTestWrapperTask(super.auditorPlacementPolicyCheckTask, latchRef, exceptedRun); + super.auditorReplicasCheckTask = + new AuditorTestWrapperTask(super.auditorReplicasCheckTask, latchRef, exceptedRun); + } + + CountDownLatch getLatch() { + return latchRef.get(); + } + + void setLatch(CountDownLatch latch) { + latchRef.set(latch); + } + + private static class AuditorTestWrapperTask extends AuditorTask { + private final AuditorTask innerTask; + private final AtomicReference latchRef; + private final AtomicBoolean exceptedRun; + + AuditorTestWrapperTask(AuditorTask innerTask, + AtomicReference latchRef, + AtomicBoolean exceptedRun) { + super(null, null, null, null, null, + null, null); + this.innerTask = innerTask; + this.latchRef = latchRef; + this.exceptedRun = exceptedRun; + } + + @Override + protected void runTask() { + if (exceptedRun == null || exceptedRun.get()) { + innerTask.runTask(); + latchRef.get().countDown(); + } + } + + @Override + public void shutdown() { + innerTask.shutdown(); + } + } + } + + private BookieId replaceBookieWithWriteFailingBookie(LedgerHandle lh) throws Exception { + int bookieIdx = -1; + Long entryId = lh.getLedgerMetadata().getAllEnsembles().firstKey(); + List curEnsemble = lh.getLedgerMetadata().getAllEnsembles().get(entryId); + + // Identify a bookie in the current ledger ensemble to be replaced + BookieId replacedBookie = null; + for (int i = 0; i < numBookies; i++) { + if (curEnsemble.contains(addressByIndex(i))) { + bookieIdx = i; + replacedBookie = addressByIndex(i); + break; + } + } + assertNotSame("Couldn't find ensemble bookie in bookie list", -1, bookieIdx); + + LOG.info("Killing bookie " + addressByIndex(bookieIdx)); + ServerConfiguration conf = killBookie(bookieIdx); + Bookie writeFailingBookie = new TestBookieImpl(conf) { + @Override + public void addEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, + Object ctx, byte[] masterKey) + throws IOException, BookieException { + try { + LOG.info("Failing write to entry "); + // sleep a bit so that writes to other bookies succeed before + // the client hears about the failure on this bookie. If the + // client gets ack-quorum number of acks first, it won't care + // about any failures and won't reform the ensemble. + Thread.sleep(100); + throw new IOException(); + } catch (InterruptedException ie) { + // ignore, only interrupted if shutting down, + // and an exception would spam the logs + Thread.currentThread().interrupt(); + } + } + }; + startAndAddBookie(conf, writeFailingBookie); + return replacedBookie; + } + + /* + * Validates that the periodic ledger check will fix entries with a failed write. + */ + @Test + public void testFailedWriteRecovery() throws Exception { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + underReplicationManager.disableLedgerReplication(); + + LedgerHandle lh = bkc.createLedger(2, 2, 1, DigestType.CRC32, "passwd".getBytes()); + + // kill one of the bookies and replace it with one that rejects write; + // This way we get into the under replication state + BookieId replacedBookie = replaceBookieWithWriteFailingBookie(lh); + + // Write a few entries; this should cause under replication + byte[] data = "foobar".getBytes(); + data = "foobar".getBytes(); + lh.addEntry(data); + lh.addEntry(data); + lh.addEntry(data); + + lh.close(); + + // enable under replication detection and wait for it to report + // under replicated ledger + underReplicationManager.enableLedgerReplication(); + long underReplicatedLedger = -1; + for (int i = 0; i < 5; i++) { + underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate(); + if (underReplicatedLedger != -1) { + break; + } + Thread.sleep(CHECK_INTERVAL * 1000); + } + assertEquals("Ledger should be under replicated", lh.getId(), underReplicatedLedger); + + // now start the replication workers + List l = new ArrayList(); + for (int i = 0; i < numBookies; i++) { + ReplicationWorker rw = new ReplicationWorker(confByIndex(i), NullStatsLogger.INSTANCE); + rw.start(); + l.add(rw); + } + underReplicationManager.close(); + + // Wait for ensemble to change after replication + Thread.sleep(3000); + for (ReplicationWorker rw : l) { + rw.shutdown(); + } + + // check that ensemble has changed and the bookie that rejected writes has + // been replaced in the ensemble + LedgerHandle newLh = bkc.openLedger(lh.getId(), DigestType.CRC32, "passwd".getBytes()); + for (Map.Entry> e : + newLh.getLedgerMetadata().getAllEnsembles().entrySet()) { + List ensemble = e.getValue(); + assertFalse("Ensemble hasn't been updated", ensemble.contains(replacedBookie)); + } + newLh.close(); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTaskTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTaskTest.java new file mode 100644 index 0000000000000..8b9c0b143028a --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTaskTest.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import java.util.LinkedList; +import java.util.List; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Unit test {@link AuditorPlacementPolicyCheckTask}. + */ +public class AuditorPlacementPolicyCheckTaskTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(AuditorPlacementPolicyCheckTaskTest.class); + + private BookKeeperAdmin admin; + private LedgerManager ledgerManager; + private LedgerUnderreplicationManager ledgerUnderreplicationManager; + + public AuditorPlacementPolicyCheckTaskTest() throws Exception { + super(3); + baseConf.setPageLimit(1); + baseConf.setAutoRecoveryDaemonEnabled(false); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + baseClientConf.setMetadataServiceUri( + metadataServiceUri.replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + final BookKeeper bookKeeper = new BookKeeper(baseClientConf); + admin = new BookKeeperAdmin(bookKeeper, NullStatsLogger.INSTANCE, new ClientConfiguration(baseClientConf)); + LedgerManagerFactory ledgerManagerFactory = bookKeeper.getLedgerManagerFactory(); + ledgerManager = ledgerManagerFactory.newLedgerManager(); + ledgerUnderreplicationManager = ledgerManagerFactory.newLedgerUnderreplicationManager(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (ledgerManager != null) { + ledgerManager.close(); + } + if (ledgerUnderreplicationManager != null) { + ledgerUnderreplicationManager.close(); + } + super.tearDown(); + } + + @Test + public void testPlacementPolicyCheck() throws BKException, InterruptedException { + + // 1. create ledgers + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + // 2. init auditorPlacementPolicyCheckTask + final TestStatsProvider statsProvider = new TestStatsProvider(); + final TestStatsProvider.TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + final AuditorStats auditorStats = new AuditorStats(statsLogger); + + AuditorPlacementPolicyCheckTask auditorPlacementPolicyCheckTask = new AuditorPlacementPolicyCheckTask( + baseConf, auditorStats, admin, ledgerManager, + ledgerUnderreplicationManager, null, (flag, throwable) -> flag.set(false)); + + // 3. placementPolicyCheck + auditorPlacementPolicyCheckTask.runTask(); + + // 4. verify + assertEquals("PLACEMENT_POLICY_CHECK_TIME", 1, ((TestStatsProvider.TestOpStatsLogger) + statsLogger.getOpStatsLogger(ReplicationStats.PLACEMENT_POLICY_CHECK_TIME)).getSuccessCount()); + assertEquals("numOfClosedLedgersAuditedInPlacementPolicyCheck", + numLedgers, + auditorPlacementPolicyCheckTask.getNumOfClosedLedgersAuditedInPlacementPolicyCheck().get()); + assertEquals("numOfLedgersFoundNotAdheringInPlacementPolicyCheck", + numLedgers, + auditorPlacementPolicyCheckTask.getNumOfLedgersFoundNotAdheringInPlacementPolicyCheck().get()); + } + +} \ No newline at end of file diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTest.java new file mode 100644 index 0000000000000..5637819a9275b --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorPlacementPolicyCheckTest.java @@ -0,0 +1,860 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicyImpl.REPP_DNS_RESOLVER_CLASS; +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import java.net.URI; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.client.LedgerMetadataBuilder; +import org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy; +import org.apache.bookkeeper.client.ZoneawareEnsemblePlacementPolicy; +import org.apache.bookkeeper.client.api.DigestType; +import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.discover.BookieServiceInfo; +import org.apache.bookkeeper.discover.RegistrationManager; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataBookieDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.meta.exceptions.MetadataException; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.replication.AuditorPeriodicCheckTest.TestAuditor; +import org.apache.bookkeeper.replication.ReplicationException.CompatibilityException; +import org.apache.bookkeeper.replication.ReplicationException.UnavailableException; +import org.apache.bookkeeper.stats.Gauge; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.bookkeeper.test.TestStatsProvider.TestOpStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider.TestStatsLogger; +import org.apache.bookkeeper.util.StaticDNSResolver; +import org.apache.commons.lang3.mutable.MutableObject; +import org.apache.zookeeper.KeeperException; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Tests the logic of Auditor's PlacementPolicyCheck. + */ +public class AuditorPlacementPolicyCheckTest extends BookKeeperClusterTestCase { + private MetadataBookieDriver driver; + + public AuditorPlacementPolicyCheckTest() throws Exception { + super(1); + baseConf.setPageLimit(1); // to make it easy to push ledger out of cache + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + StaticDNSResolver.reset(); + + URI uri = URI.create(confByIndex(0).getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver = MetadataDrivers.getBookieDriver(uri); + ServerConfiguration serverConfiguration = new ServerConfiguration(confByIndex(0)); + serverConfiguration.setMetadataServiceUri( + serverConfiguration.getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver.initialize(serverConfiguration, NullStatsLogger.INSTANCE); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (null != driver) { + driver.close(); + } + super.tearDown(); + } + + @Test + public void testPlacementPolicyCheckWithBookiesFromDifferentRacks() throws Exception { + int numOfBookies = 5; + List bookieAddresses = new ArrayList<>(); + BookieSocketAddress bookieAddress; + RegistrationManager regManager = driver.createRegistrationManager(); + // all the numOfBookies (5) are going to be in different racks + for (int i = 0; i < numOfBookies; i++) { + bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181); + StaticDNSResolver.addNodeToRack(bookieAddress.getHostName(), "/rack" + (i)); + bookieAddresses.add(bookieAddress.toBookieId()); + regManager.registerBookie(bookieAddress.toBookieId(), false, BookieServiceInfo.EMPTY); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 4; + int ackQuorumSize = 2; + int minNumRacksPerWriteQuorumConfValue = 4; + Collections.shuffle(bookieAddresses); + + // closed ledger + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + + Collections.shuffle(bookieAddresses); + ensembleSize = 4; + // closed ledger with multiple segments + initMeta = LedgerMetadataBuilder.create() + .withId(2L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses.subList(0, 4)) + .newEnsembleEntry(20L, bookieAddresses.subList(1, 5)) + .newEnsembleEntry(60L, bookieAddresses.subList(0, 4)) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(2L, initMeta).get(); + + Collections.shuffle(bookieAddresses); + // non-closed ledger + initMeta = LedgerMetadataBuilder.create() + .withId(3L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses.subList(0, 4)) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(3L, initMeta).get(); + + Collections.shuffle(bookieAddresses); + // non-closed ledger with multiple segments + initMeta = LedgerMetadataBuilder.create() + .withId(4L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses.subList(0, 4)) + .newEnsembleEntry(20L, bookieAddresses.subList(1, 5)) + .newEnsembleEntry(60L, bookieAddresses.subList(0, 4)) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(4L, initMeta).get(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + /* + * since all of the bookies are in different racks, there shouldn't be any ledger not adhering + * to placement policy. + */ + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", 0, + ledgersNotAdheringToPlacementPolicyGuage.getSample()); + /* + * since all of the bookies are in different racks, there shouldn't be any ledger softly adhering + * to placement policy. + */ + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", 0, + ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + } + + @Test + public void testPlacementPolicyCheckWithLedgersNotAdheringToPlacementPolicy() throws Exception { + int numOfBookies = 5; + int numOfLedgersNotAdheringToPlacementPolicy = 0; + List bookieAddresses = new ArrayList<>(); + RegistrationManager regManager = driver.createRegistrationManager(); + for (int i = 0; i < numOfBookies; i++) { + BookieId bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + + // only three racks + StaticDNSResolver.addNodeToRack("98.98.98.0", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.1", "/rack2"); + StaticDNSResolver.addNodeToRack("98.98.98.2", "/rack3"); + StaticDNSResolver.addNodeToRack("98.98.98.3", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.4", "/rack2"); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + int minNumRacksPerWriteQuorumConfValue = 3; + + /* + * this closed ledger doesn't adhere to placement policy because there are only + * 3 racks, and the ensembleSize is 5. + */ + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + numOfLedgersNotAdheringToPlacementPolicy++; + + /* + * this is non-closed ledger, so it shouldn't count as ledger not + * adhering to placement policy + */ + initMeta = LedgerMetadataBuilder.create() + .withId(2L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(2L, initMeta).get(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", + numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", + 0, ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + } + + @Test + public void testPlacementPolicyCheckWithLedgersNotAdheringToPlacementPolicyAndNotMarkToUnderreplication() + throws Exception { + int numOfBookies = 5; + int numOfLedgersNotAdheringToPlacementPolicy = 0; + List bookieAddresses = new ArrayList<>(); + RegistrationManager regManager = driver.createRegistrationManager(); + for (int i = 0; i < numOfBookies; i++) { + BookieId bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + + // only three racks + StaticDNSResolver.addNodeToRack("98.98.98.0", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.1", "/rack2"); + StaticDNSResolver.addNodeToRack("98.98.98.2", "/rack3"); + StaticDNSResolver.addNodeToRack("98.98.98.3", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.4", "/rack2"); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + int minNumRacksPerWriteQuorumConfValue = 3; + + /* + * this closed ledger doesn't adhere to placement policy because there are only + * 3 racks, and the ensembleSize is 5. + */ + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + numOfLedgersNotAdheringToPlacementPolicy++; + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", + numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", + 0, ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + LedgerUnderreplicationManager underreplicationManager = mFactory.newLedgerUnderreplicationManager(); + long unnderReplicateLedgerId = underreplicationManager.pollLedgerToRereplicate(); + assertEquals(unnderReplicateLedgerId, -1); + } + + @Test + public void testPlacementPolicyCheckWithLedgersNotAdheringToPlacementPolicyAndMarkToUnderreplication() + throws Exception { + int numOfBookies = 5; + int numOfLedgersNotAdheringToPlacementPolicy = 0; + List bookieAddresses = new ArrayList<>(); + RegistrationManager regManager = driver.createRegistrationManager(); + for (int i = 0; i < numOfBookies; i++) { + BookieId bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + + // only three racks + StaticDNSResolver.addNodeToRack("98.98.98.0", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.1", "/rack2"); + StaticDNSResolver.addNodeToRack("98.98.98.2", "/rack3"); + StaticDNSResolver.addNodeToRack("98.98.98.3", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.4", "/rack2"); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + int minNumRacksPerWriteQuorumConfValue = 3; + + /* + * this closed ledger doesn't adhere to placement policy because there are only + * 3 racks, and the ensembleSize is 5. + */ + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + numOfLedgersNotAdheringToPlacementPolicy++; + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + servConf.setRepairedPlacementPolicyNotAdheringBookieEnable(true); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", + numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", + 0, ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + LedgerUnderreplicationManager underreplicationManager = mFactory.newLedgerUnderreplicationManager(); + long unnderReplicateLedgerId = underreplicationManager.pollLedgerToRereplicate(); + assertEquals(unnderReplicateLedgerId, 1L); + } + + @Test + public void testPlacementPolicyCheckForURLedgersElapsedRecoveryGracePeriod() throws Exception { + testPlacementPolicyCheckWithURLedgers(true); + } + + @Test + public void testPlacementPolicyCheckForURLedgersNotElapsedRecoveryGracePeriod() throws Exception { + testPlacementPolicyCheckWithURLedgers(false); + } + + public void testPlacementPolicyCheckWithURLedgers(boolean timeElapsed) throws Exception { + int numOfBookies = 4; + /* + * in timeElapsed=true scenario, set some low value, otherwise set some + * highValue. + */ + int underreplicatedLedgerRecoveryGracePeriod = timeElapsed ? 1 : 1000; + int numOfURLedgersElapsedRecoveryGracePeriod = 0; + List bookieAddresses = new ArrayList(); + RegistrationManager regManager = driver.createRegistrationManager(); + for (int i = 0; i < numOfBookies; i++) { + BookieId bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + LedgerUnderreplicationManager underreplicationManager = mFactory.newLedgerUnderreplicationManager(); + int ensembleSize = 4; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + + long ledgerId1 = 1L; + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(ledgerId1) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(ledgerId1, initMeta).get(); + underreplicationManager.markLedgerUnderreplicated(ledgerId1, bookieAddresses.get(0).toString()); + if (timeElapsed) { + numOfURLedgersElapsedRecoveryGracePeriod++; + } + + /* + * this is non-closed ledger, it should also be reported as + * URLedgersElapsedRecoveryGracePeriod + */ + ensembleSize = 3; + long ledgerId2 = 21234561L; + initMeta = LedgerMetadataBuilder.create() + .withId(ledgerId2) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, + Arrays.asList(bookieAddresses.get(0), bookieAddresses.get(1), bookieAddresses.get(2))) + .newEnsembleEntry(100L, + Arrays.asList(bookieAddresses.get(3), bookieAddresses.get(1), bookieAddresses.get(2))) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(ledgerId2, initMeta).get(); + underreplicationManager.markLedgerUnderreplicated(ledgerId2, bookieAddresses.get(0).toString()); + if (timeElapsed) { + numOfURLedgersElapsedRecoveryGracePeriod++; + } + + /* + * this ledger is not marked underreplicated. + */ + long ledgerId3 = 31234561L; + initMeta = LedgerMetadataBuilder.create() + .withId(ledgerId3) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, + Arrays.asList(bookieAddresses.get(1), bookieAddresses.get(2), bookieAddresses.get(3))) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(ledgerId3, initMeta).get(); + + if (timeElapsed) { + /* + * in timeelapsed scenario, by waiting for + * underreplicatedLedgerRecoveryGracePeriod, recovery time must be + * elapsed. + */ + Thread.sleep((underreplicatedLedgerRecoveryGracePeriod + 1) * 1000); + } else { + /* + * in timeElapsed=false scenario, since + * underreplicatedLedgerRecoveryGracePeriod is set to some high + * value, there is no value in waiting. So just wait for some time + * and make sure urledgers are not reported as recoverytime elapsed + * urledgers. + */ + Thread.sleep(5000); + } + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setUnderreplicatedLedgerRecoveryGracePeriod(underreplicatedLedgerRecoveryGracePeriod); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge underreplicatedLedgersElapsedRecoveryGracePeriodGuage = statsLogger + .getGauge(ReplicationStats.NUM_UNDERREPLICATED_LEDGERS_ELAPSED_RECOVERY_GRACE_PERIOD); + assertEquals("NUM_UNDERREPLICATED_LEDGERS_ELAPSED_RECOVERY_GRACE_PERIOD guage value", + numOfURLedgersElapsedRecoveryGracePeriod, + underreplicatedLedgersElapsedRecoveryGracePeriodGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + } + + @Test + public void testPlacementPolicyCheckWithLedgersNotAdheringToPolicyWithMultipleSegments() throws Exception { + int numOfBookies = 7; + int numOfLedgersNotAdheringToPlacementPolicy = 0; + List bookieAddresses = new ArrayList<>(); + RegistrationManager regManager = driver.createRegistrationManager(); + for (int i = 0; i < numOfBookies; i++) { + BookieId bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + + // only three racks + StaticDNSResolver.addNodeToRack("98.98.98.0", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.1", "/rack2"); + StaticDNSResolver.addNodeToRack("98.98.98.2", "/rack3"); + StaticDNSResolver.addNodeToRack("98.98.98.3", "/rack4"); + StaticDNSResolver.addNodeToRack("98.98.98.4", "/rack1"); + StaticDNSResolver.addNodeToRack("98.98.98.5", "/rack2"); + StaticDNSResolver.addNodeToRack("98.98.98.6", "/rack3"); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 5; + int ackQuorumSize = 2; + int minNumRacksPerWriteQuorumConfValue = 4; + + /* + * this closed ledger in each writeQuorumSize (5), there would be + * atleast minNumRacksPerWriteQuorumConfValue (4) racks. So it wont be + * counted as ledgers not adhering to placement policy. + */ + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses.subList(0, 5)) + .newEnsembleEntry(20L, bookieAddresses.subList(1, 6)) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + + /* + * for the second segment bookies are from /rack1, /rack2 and /rack3, + * which is < minNumRacksPerWriteQuorumConfValue (4). So it is not + * adhering to placement policy. + * + * also for the third segment are from /rack1, /rack2 and /rack3, which + * is < minNumRacksPerWriteQuorumConfValue (4). So it is not adhering to + * placement policy. + * + * Though there are multiple segments are not adhering to placement + * policy, it should be counted as single ledger. + */ + initMeta = LedgerMetadataBuilder.create() + .withId(2L) + .withEnsembleSize(ensembleSize) + .withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize) + .newEnsembleEntry(0L, bookieAddresses.subList(0, 5)) + .newEnsembleEntry(20L, + Arrays.asList(bookieAddresses.get(0), bookieAddresses.get(1), bookieAddresses.get(2), + bookieAddresses.get(4), bookieAddresses.get(5))) + .newEnsembleEntry(40L, + Arrays.asList(bookieAddresses.get(0), bookieAddresses.get(1), bookieAddresses.get(2), + bookieAddresses.get(4), bookieAddresses.get(6))) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(2L, initMeta).get(); + numOfLedgersNotAdheringToPlacementPolicy++; + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + setServerConfigPropertiesForRackPlacement(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY gauge value", + numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY gauge value", + 0, ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + } + + @Test + public void testZoneawarePlacementPolicyCheck() throws Exception { + int numOfBookies = 6; + int numOfLedgersNotAdheringToPlacementPolicy = 0; + int numOfLedgersSoftlyAdheringToPlacementPolicy = 0; + List bookieAddresses = new ArrayList(); + RegistrationManager regManager = driver.createRegistrationManager(); + /* + * 6 bookies - 3 zones and 2 uds + */ + for (int i = 0; i < numOfBookies; i++) { + BookieSocketAddress bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181); + bookieAddresses.add(bookieAddress.toBookieId()); + regManager.registerBookie(bookieAddress.toBookieId(), false, BookieServiceInfo.EMPTY); + String zone = "/zone" + (i % 3); + String upgradeDomain = "/ud" + (i % 2); + String networkLocation = zone + upgradeDomain; + StaticDNSResolver.addNodeToRack(bookieAddress.getHostName(), networkLocation); + } + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setDesiredNumZonesPerWriteQuorum(3); + servConf.setMinNumZonesPerWriteQuorum(2); + setServerConfigPropertiesForZonePlacement(servConf); + + /* + * this closed ledger adheres to ZoneAwarePlacementPolicy, since + * ensemble is spread across 3 zones and 2 UDs + */ + LedgerMetadata initMeta = LedgerMetadataBuilder.create() + .withId(1L) + .withEnsembleSize(6) + .withWriteQuorumSize(6) + .withAckQuorumSize(2) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(1L, initMeta).get(); + + /* + * this is non-closed ledger, so though ensemble is not adhering to + * placement policy (since ensemble is not multiple of writeQuorum), + * this shouldn't be reported + */ + initMeta = LedgerMetadataBuilder.create() + .withId(2L) + .withEnsembleSize(6) + .withWriteQuorumSize(5) + .withAckQuorumSize(2) + .newEnsembleEntry(0L, bookieAddresses) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(2L, initMeta).get(); + + /* + * this is closed ledger, since ensemble is not multiple of writeQuorum, + * this ledger is not adhering to placement policy. + */ + initMeta = LedgerMetadataBuilder.create() + .withId(3L) + .withEnsembleSize(6) + .withWriteQuorumSize(5) + .withAckQuorumSize(2) + .newEnsembleEntry(0L, bookieAddresses) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(3L, initMeta).get(); + numOfLedgersNotAdheringToPlacementPolicy++; + + /* + * this closed ledger adheres softly to ZoneAwarePlacementPolicy, since + * ensemble/writeQuorum of size 4 has spread across just + * minNumZonesPerWriteQuorum (2). + */ + List newEnsemble = new ArrayList(); + newEnsemble.add(bookieAddresses.get(0)); + newEnsemble.add(bookieAddresses.get(1)); + newEnsemble.add(bookieAddresses.get(3)); + newEnsemble.add(bookieAddresses.get(4)); + initMeta = LedgerMetadataBuilder.create() + .withId(4L) + .withEnsembleSize(4) + .withWriteQuorumSize(4) + .withAckQuorumSize(2) + .newEnsembleEntry(0L, newEnsemble) + .withClosedState() + .withLastEntryId(100) + .withLength(10000) + .withDigestType(DigestType.DUMMY) + .withPassword(new byte[0]) + .build(); + lm.createLedgerMetadata(4L, initMeta).get(); + numOfLedgersSoftlyAdheringToPlacementPolicy++; + + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", + numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", + numOfLedgersSoftlyAdheringToPlacementPolicy, + ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + regManager.close(); + } + } + + private void setServerConfigPropertiesForRackPlacement(ServerConfiguration servConf) { + setServerConfigProperties(servConf, RackawareEnsemblePlacementPolicy.class.getName()); + } + + private void setServerConfigPropertiesForZonePlacement(ServerConfiguration servConf) { + setServerConfigProperties(servConf, ZoneawareEnsemblePlacementPolicy.class.getName()); + } + + private void setServerConfigProperties(ServerConfiguration servConf, String ensemblePlacementPolicyClass) { + servConf.setProperty(REPP_DNS_RESOLVER_CLASS, StaticDNSResolver.class.getName()); + servConf.setProperty(ClientConfiguration.ENSEMBLE_PLACEMENT_POLICY, ensemblePlacementPolicyClass); + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(0); + servConf.setAuditorPeriodicReplicasCheckInterval(0); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(1000); + } + + private TestStatsLogger startAuditorAndWaitForPlacementPolicyCheck(ServerConfiguration servConf, + MutableObject auditorRef) throws MetadataException, CompatibilityException, KeeperException, + InterruptedException, UnavailableException, UnknownHostException { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestOpStatsLogger placementPolicyCheckStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.PLACEMENT_POLICY_CHECK_TIME); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, + statsLogger, null); + auditorRef.setValue(auditor); + CountDownLatch latch = auditor.getLatch(); + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 0, placementPolicyCheckStatsLogger.getSuccessCount()); + urm.setPlacementPolicyCheckCTime(-1); + auditor.start(); + /* + * since placementPolicyCheckCTime is set to -1, placementPolicyCheck should be + * scheduled to run with no initialdelay + */ + assertTrue("placementPolicyCheck should have executed", latch.await(20, TimeUnit.SECONDS)); + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (placementPolicyCheckStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 1, placementPolicyCheckStatsLogger.getSuccessCount()); + return statsLogger; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTaskTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTaskTest.java new file mode 100644 index 0000000000000..62162bd25f427 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTaskTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import java.util.LinkedList; +import java.util.List; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Unit test {@link AuditorReplicasCheckTask}. + */ +public class AuditorReplicasCheckTaskTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(AuditorReplicasCheckTaskTest.class); + + private BookKeeperAdmin admin; + private LedgerManager ledgerManager; + private LedgerUnderreplicationManager ledgerUnderreplicationManager; + + public AuditorReplicasCheckTaskTest() throws Exception { + super(3); + baseConf.setPageLimit(1); + baseConf.setAutoRecoveryDaemonEnabled(false); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + baseClientConf.setMetadataServiceUri( + metadataServiceUri.replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + final BookKeeper bookKeeper = new BookKeeper(baseClientConf); + admin = new BookKeeperAdmin(bookKeeper, NullStatsLogger.INSTANCE, new ClientConfiguration(baseClientConf)); + LedgerManagerFactory ledgerManagerFactory = bookKeeper.getLedgerManagerFactory(); + ledgerManager = ledgerManagerFactory.newLedgerManager(); + ledgerUnderreplicationManager = ledgerManagerFactory.newLedgerUnderreplicationManager(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (ledgerManager != null) { + ledgerManager.close(); + } + if (ledgerUnderreplicationManager != null) { + ledgerUnderreplicationManager.close(); + } + if (admin != null) { + admin.close(); + } + super.tearDown(); + } + + @Test + public void testReplicasCheck() throws BKException, InterruptedException { + + // 1. create ledgers + final int numLedgers = 10; + List ids = new LinkedList(); + for (int i = 0; i < numLedgers; i++) { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, "passwd".getBytes()); + ids.add(lh.getId()); + for (int j = 0; j < 2; j++) { + lh.addEntry("testdata".getBytes()); + } + lh.close(); + } + + // 2. init auditorReplicasCheckTask + final TestStatsProvider statsProvider = new TestStatsProvider(); + final TestStatsProvider.TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + final AuditorStats auditorStats = new AuditorStats(statsLogger); + AuditorReplicasCheckTask auditorReplicasCheckTask = new AuditorReplicasCheckTask( + baseConf, auditorStats, admin, ledgerManager, + ledgerUnderreplicationManager, null, (flag, throwable) -> flag.set(false)); + + // 3. replicasCheck + auditorReplicasCheckTask.runTask(); + + // 4. verify + assertEquals("REPLICAS_CHECK_TIME", 1, ((TestStatsProvider.TestOpStatsLogger) + statsLogger.getOpStatsLogger(ReplicationStats.REPLICAS_CHECK_TIME)).getSuccessCount()); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTest.java new file mode 100644 index 0000000000000..2e9dbc158597d --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorReplicasCheckTest.java @@ -0,0 +1,936 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import java.net.URI; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.bookie.BookieException; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeperAdmin; +import org.apache.bookkeeper.client.LedgerMetadataBuilder; +import org.apache.bookkeeper.client.api.DigestType; +import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.discover.BookieServiceInfo; +import org.apache.bookkeeper.discover.RegistrationManager; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataBookieDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.meta.exceptions.MetadataException; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.replication.AuditorPeriodicCheckTest.TestAuditor; +import org.apache.bookkeeper.replication.ReplicationException.CompatibilityException; +import org.apache.bookkeeper.replication.ReplicationException.UnavailableException; +import org.apache.bookkeeper.stats.Gauge; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.stats.StatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.bookkeeper.test.TestStatsProvider.TestOpStatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider.TestStatsLogger; +import org.apache.bookkeeper.util.AvailabilityOfEntriesOfLedger; +import org.apache.bookkeeper.util.StaticDNSResolver; +import org.apache.commons.collections4.map.MultiKeyMap; +import org.apache.commons.lang3.mutable.MutableObject; +import org.apache.zookeeper.KeeperException; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Tests the logic of Auditor's ReplicasCheck. + */ +public class AuditorReplicasCheckTest extends BookKeeperClusterTestCase { + private MetadataBookieDriver driver; + private RegistrationManager regManager; + + public AuditorReplicasCheckTest() throws Exception { + super(1); + baseConf.setPageLimit(1); // to make it easy to push ledger out of cache + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + StaticDNSResolver.reset(); + + URI uri = URI.create(confByIndex(0).getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver = MetadataDrivers.getBookieDriver(uri); + ServerConfiguration serverConfiguration = new ServerConfiguration(confByIndex(0)); + serverConfiguration.setMetadataServiceUri( + serverConfiguration.getMetadataServiceUri().replaceAll("zk://", "metadata-store:") + .replaceAll("/ledgers", "")); + driver.initialize(serverConfiguration, NullStatsLogger.INSTANCE); + regManager = driver.createRegistrationManager(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + if (null != regManager) { + regManager.close(); + } + if (null != driver) { + driver.close(); + } + super.tearDown(); + } + + private class TestBookKeeperAdmin extends BookKeeperAdmin { + + private final MultiKeyMap returnAvailabilityOfEntriesOfLedger; + private final MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger; + + public TestBookKeeperAdmin(BookKeeper bkc, StatsLogger statsLogger, + MultiKeyMap returnAvailabilityOfEntriesOfLedger, + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger) { + super(bkc, statsLogger, baseClientConf); + this.returnAvailabilityOfEntriesOfLedger = returnAvailabilityOfEntriesOfLedger; + this.errorReturnValueForGetAvailabilityOfEntriesOfLedger = + errorReturnValueForGetAvailabilityOfEntriesOfLedger; + } + + @Override + public CompletableFuture asyncGetListOfEntriesOfLedger( + BookieId address, long ledgerId) { + CompletableFuture futureResult = + new CompletableFuture(); + Integer errorReturnValue = errorReturnValueForGetAvailabilityOfEntriesOfLedger.get(address.toString(), + Long.toString(ledgerId)); + if (errorReturnValue != null) { + futureResult.completeExceptionally(BKException.create(errorReturnValue).fillInStackTrace()); + } else { + AvailabilityOfEntriesOfLedger availabilityOfEntriesOfLedger = returnAvailabilityOfEntriesOfLedger + .get(address.toString(), Long.toString(ledgerId)); + futureResult.complete(availabilityOfEntriesOfLedger); + } + return futureResult; + } + } + + private TestStatsLogger startAuditorAndWaitForReplicasCheck(ServerConfiguration servConf, + MutableObject auditorRef, + MultiKeyMap expectedReturnAvailabilityOfEntriesOfLedger, + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger) + throws MetadataException, CompatibilityException, KeeperException, InterruptedException, + UnavailableException, UnknownHostException { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestOpStatsLogger replicasCheckStatsLogger = (TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.REPLICAS_CHECK_TIME); + + final TestAuditor auditor = new TestAuditor(BookieImpl.getBookieId(servConf).toString(), servConf, bkc, true, + new TestBookKeeperAdmin(bkc, statsLogger, expectedReturnAvailabilityOfEntriesOfLedger, + errorReturnValueForGetAvailabilityOfEntriesOfLedger), + true, statsLogger, null); + auditorRef.setValue(auditor); + CountDownLatch latch = auditor.getLatch(); + assertEquals("REPLICAS_CHECK_TIME SuccessCount", 0, replicasCheckStatsLogger.getSuccessCount()); + urm.setReplicasCheckCTime(-1); + auditor.start(); + /* + * since replicasCheckCTime is set to -1, replicasCheck should be + * scheduled to run with no initialdelay + */ + assertTrue("replicasCheck should have executed", latch.await(20, TimeUnit.SECONDS)); + for (int i = 0; i < 200; i++) { + Thread.sleep(100); + if (replicasCheckStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("REPLICAS_CHECK_TIME SuccessCount", 1, replicasCheckStatsLogger.getSuccessCount()); + return statsLogger; + } + + private void setServerConfigProperties(ServerConfiguration servConf) { + servConf.setAuditorPeriodicCheckInterval(0); + servConf.setAuditorPeriodicBookieCheckInterval(0); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(0); + servConf.setAuditorPeriodicReplicasCheckInterval(1000); + } + + List addAndRegisterBookies(int numOfBookies) + throws BookieException { + BookieId bookieAddress; + List bookieAddresses = new ArrayList(); + for (int i = 0; i < numOfBookies; i++) { + bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181).toBookieId(); + bookieAddresses.add(bookieAddress); + regManager.registerBookie(bookieAddress, false, BookieServiceInfo.EMPTY); + } + return bookieAddresses; + } + + private void createClosedLedgerMetadata(LedgerManager lm, long ledgerId, int ensembleSize, int writeQuorumSize, + int ackQuorumSize, Map> segmentEnsembles, long lastEntryId, int length, + DigestType digestType, byte[] password) throws InterruptedException, ExecutionException { + LedgerMetadataBuilder ledgerMetadataBuilder = LedgerMetadataBuilder.create(); + ledgerMetadataBuilder.withId(ledgerId).withEnsembleSize(ensembleSize).withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize).withClosedState().withLastEntryId(lastEntryId).withLength(length) + .withDigestType(digestType).withPassword(password); + for (Map.Entry> mapEntry : segmentEnsembles.entrySet()) { + ledgerMetadataBuilder.newEnsembleEntry(mapEntry.getKey(), mapEntry.getValue()); + } + LedgerMetadata initMeta = ledgerMetadataBuilder.build(); + lm.createLedgerMetadata(ledgerId, initMeta).get(); + } + + private void createNonClosedLedgerMetadata(LedgerManager lm, long ledgerId, int ensembleSize, int writeQuorumSize, + int ackQuorumSize, Map> segmentEnsembles, DigestType digestType, + byte[] password) throws InterruptedException, ExecutionException { + LedgerMetadataBuilder ledgerMetadataBuilder = LedgerMetadataBuilder.create(); + ledgerMetadataBuilder.withId(ledgerId).withEnsembleSize(ensembleSize).withWriteQuorumSize(writeQuorumSize) + .withAckQuorumSize(ackQuorumSize).withDigestType(digestType).withPassword(password); + for (Map.Entry> mapEntry : segmentEnsembles.entrySet()) { + ledgerMetadataBuilder.newEnsembleEntry(mapEntry.getKey(), mapEntry.getValue()); + } + LedgerMetadata initMeta = ledgerMetadataBuilder.build(); + lm.createLedgerMetadata(ledgerId, initMeta).get(); + } + + private void runTestScenario(MultiKeyMap returnAvailabilityOfEntriesOfLedger, + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger, + int expectedNumLedgersFoundHavingNoReplicaOfAnEntry, + int expectedNumLedgersHavingLessThanAQReplicasOfAnEntry, + int expectedNumLedgersHavingLessThanWQReplicasOfAnEntry) throws Exception { + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + setServerConfigProperties(servConf); + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForReplicasCheck(servConf, auditorRef, + returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger); + checkReplicasCheckStats(statsLogger, expectedNumLedgersFoundHavingNoReplicaOfAnEntry, + expectedNumLedgersHavingLessThanAQReplicasOfAnEntry, + expectedNumLedgersHavingLessThanWQReplicasOfAnEntry); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + } + } + + private void checkReplicasCheckStats(TestStatsLogger statsLogger, + int expectedNumLedgersFoundHavingNoReplicaOfAnEntry, + int expectedNumLedgersHavingLessThanAQReplicasOfAnEntry, + int expectedNumLedgersHavingLessThanWQReplicasOfAnEntry) { + Gauge numLedgersFoundHavingNoReplicaOfAnEntryGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_HAVING_NO_REPLICA_OF_AN_ENTRY); + Gauge numLedgersHavingLessThanAQReplicasOfAnEntryGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_HAVING_LESS_THAN_AQ_REPLICAS_OF_AN_ENTRY); + Gauge numLedgersHavingLessThanWQReplicasOfAnEntryGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_HAVING_LESS_THAN_WQ_REPLICAS_OF_AN_ENTRY); + + assertEquals("NUM_LEDGERS_HAVING_NO_REPLICA_OF_AN_ENTRY guage value", + expectedNumLedgersFoundHavingNoReplicaOfAnEntry, + numLedgersFoundHavingNoReplicaOfAnEntryGuage.getSample()); + assertEquals("NUM_LEDGERS_HAVING_LESS_THAN_AQ_REPLICAS_OF_AN_ENTRY guage value", + expectedNumLedgersHavingLessThanAQReplicasOfAnEntry, + numLedgersHavingLessThanAQReplicasOfAnEntryGuage.getSample()); + assertEquals("NUM_LEDGERS_HAVING_LESS_THAN_WQ_REPLICAS_OF_AN_ENTRY guage value", + expectedNumLedgersHavingLessThanWQReplicasOfAnEntry, + numLedgersHavingLessThanWQReplicasOfAnEntryGuage.getSample()); + } + + /* + * For all the ledgers and for all the bookies, + * asyncGetListOfEntriesOfLedger would return + * BookieHandleNotAvailableException, so these ledgers wouldn't be counted + * against expectedNumLedgersFoundHavingNoReplicaOfAnEntry / + * LessThanAQReplicasOfAnEntry / LessThanWQReplicasOfAnEntry. + */ + @Test + public void testReplicasCheckForBookieHandleNotAvailable() throws Exception { + int numOfBookies = 5; + MultiKeyMap returnAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + List bookieAddresses = addAndRegisterBookies(numOfBookies); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 4; + int ackQuorumSize = 2; + long lastEntryId = 100; + int length = 10000; + DigestType digestType = DigestType.DUMMY; + byte[] password = new byte[0]; + Collections.shuffle(bookieAddresses); + + /* + * closed ledger + * + * for this ledger, for all the bookies we are setting + * errorReturnValueForGetAvailabilityOfEntriesOfLedger to + * BookieHandleNotAvailableException so asyncGetListOfEntriesOfLedger will + * return BookieHandleNotAvailableException. + */ + Map> segmentEnsembles = new LinkedHashMap>(); + segmentEnsembles.put(0L, bookieAddresses); + long ledgerId = 1L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + } + + ensembleSize = 4; + /* + * closed ledger with multiple segments + * + * for this ledger, for all the bookies we are setting + * errorReturnValueForGetAvailabilityOfEntriesOfLedger to + * BookieHandleNotAvailableException so asyncGetListOfEntriesOfLedger will + * return BookieHandleNotAvailableException. + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(20L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put(60L, bookieAddresses.subList(0, 4)); + ledgerId = 2L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + } + + /* + * non-closed ledger + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + ledgerId = 3L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + } + + /* + * non-closed ledger with multiple segments + * + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(20L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put(60L, bookieAddresses.subList(0, 4)); + ledgerId = 4L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + } + + runTestScenario(returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger, 0, 0, + 0); + } + + /* + * In this testscenario all the ledgers have a missing entry. So all closed + * ledgers should be counted towards + * numLedgersFoundHavingNoReplicaOfAnEntry. + */ + @Test + public void testReplicasCheckForLedgersFoundHavingNoReplica() throws Exception { + int numOfBookies = 5; + MultiKeyMap returnAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + List bookieAddresses = addAndRegisterBookies(numOfBookies); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + int ensembleSize = 5; + int writeQuorumSize = 4; + int ackQuorumSize = 2; + long lastEntryId = 100; + int length = 10000; + DigestType digestType = DigestType.DUMMY; + byte[] password = new byte[0]; + Collections.shuffle(bookieAddresses); + + int numLedgersFoundHavingNoReplicaOfAnEntry = 0; + + /* + * closed ledger + * + * for this ledger we are setting returnAvailabilityOfEntriesOfLedger to + * Empty one for all of the bookies, so this ledger would be counted in + * ledgersFoundHavingNoReplicaOfAnEntry . + */ + Map> segmentEnsembles = new LinkedHashMap>(); + segmentEnsembles.put(0L, bookieAddresses); + long ledgerId = 1L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + returnAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), Long.toString(ledgerId), + AvailabilityOfEntriesOfLedger.EMPTY_AVAILABILITYOFENTRIESOFLEDGER); + } + numLedgersFoundHavingNoReplicaOfAnEntry++; + + ensembleSize = 4; + /* + * closed ledger with multiple segments + * + * for this ledger we are setting + * errorReturnValueForGetAvailabilityOfEntriesOfLedger to + * NoSuchLedgerExistsException. This is equivalent to + * EMPTY_AVAILABILITYOFENTRIESOFLEDGER. So this ledger would be counted + * in ledgersFoundHavingNoReplicaOfAnEntry + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(20L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put(60L, bookieAddresses.subList(0, 4)); + ledgerId = 2L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), + Long.toString(ledgerId), BKException.Code.NoSuchLedgerExistsException); + } + numLedgersFoundHavingNoReplicaOfAnEntry++; + + /* + * non-closed ledger + * + * since this is non-closed ledger, it should not be counted in + * ledgersFoundHavingNoReplicaOfAnEntry + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + ledgerId = 3L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + returnAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), Long.toString(ledgerId), + AvailabilityOfEntriesOfLedger.EMPTY_AVAILABILITYOFENTRIESOFLEDGER); + } + + ensembleSize = 3; + writeQuorumSize = 3; + ackQuorumSize = 2; + lastEntryId = 1; + length = 1000; + /* + * closed ledger + * + * for this ledger we are setting returnAvailabilityOfEntriesOfLedger to + * just {0l} for all of the bookies and entry 1l is missing for all of + * the bookies, so this ledger would be counted in + * ledgersFoundHavingNoReplicaOfAnEntry + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 3)); + ledgerId = 4L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + for (BookieId bookieSocketAddress : bookieAddresses) { + returnAvailabilityOfEntriesOfLedger.put(bookieSocketAddress.toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0L })); + } + numLedgersFoundHavingNoReplicaOfAnEntry++; + + /* + * For this closed ledger, entry 1 is missing. So it should be counted + * towards numLedgersFoundHavingNoReplicaOfAnEntry. + */ + ensembleSize = 4; + writeQuorumSize = 3; + ackQuorumSize = 2; + lastEntryId = 3; + length = 10000; + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + ledgerId = 5L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 2, 3 })); + numLedgersFoundHavingNoReplicaOfAnEntry++; + + runTestScenario(returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger, + numLedgersFoundHavingNoReplicaOfAnEntry, 0, 0); + } + + /* + * In this testscenario all the ledgers have an entry with less than AQ + * number of copies. So all closed ledgers should be counted towards + * numLedgersFoundHavingLessThanAQReplicasOfAnEntry. + */ + @Test + public void testReplicasCheckForLedgersFoundHavingLessThanAQReplicasOfAnEntry() throws Exception { + int numOfBookies = 5; + MultiKeyMap returnAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + List bookieAddresses = addAndRegisterBookies(numOfBookies); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + DigestType digestType = DigestType.DUMMY; + byte[] password = new byte[0]; + Collections.shuffle(bookieAddresses); + + int numLedgersFoundHavingLessThanAQReplicasOfAnEntry = 0; + + /* + * closed ledger + * + * for this ledger there is only one copy of entry 2, so this ledger + * would be counted towards + * ledgersFoundHavingLessThanAQReplicasOfAnEntry. + */ + Map> segmentEnsembles = new LinkedHashMap>(); + int ensembleSize = 4; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + long lastEntryId = 3; + int length = 10000; + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + long ledgerId = 1L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 2, 3 })); + numLedgersFoundHavingLessThanAQReplicasOfAnEntry++; + + /* + * closed ledger with multiple segments. + * + * for this ledger there is only one copy of entry 2, so this ledger + * would be counted towards + * ledgersFoundHavingLessThanAQReplicasOfAnEntry. + * + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(2L, bookieAddresses.subList(1, 5)); + ledgerId = 2L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] {})); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(4).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 3 })); + numLedgersFoundHavingLessThanAQReplicasOfAnEntry++; + + /* + * closed ledger with multiple segments + * + * for this ledger entry 2 is overrreplicated, but it has only one copy + * in the set of bookies it is supposed to be. So it should be counted + * towards ledgersFoundHavingLessThanAQReplicasOfAnEntry. + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(2L, bookieAddresses.subList(1, 5)); + ledgerId = 3L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(4).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 3 })); + numLedgersFoundHavingLessThanAQReplicasOfAnEntry++; + + /* + * non-closed ledger + * + * since this is non-closed ledger, it should not be counted towards + * ledgersFoundHavingLessThanAQReplicasOfAnEntry + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(2L, bookieAddresses.subList(1, 5)); + ledgerId = 4L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] {})); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(4).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 3 })); + + /* + * this is closed ledger. + * + * For third bookie, asyncGetListOfEntriesOfLedger will fail with + * BookieHandleNotAvailableException, so this should not be counted + * against missing copies of an entry. Other than that, for both entries + * 0 and 1, two copies are missing. Hence this should be counted towards + * numLedgersFoundHavingLessThanAQReplicasOfAnEntry. + */ + ensembleSize = 3; + writeQuorumSize = 3; + ackQuorumSize = 2; + lastEntryId = 1; + length = 1000; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 3)); + ledgerId = 5L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + AvailabilityOfEntriesOfLedger.EMPTY_AVAILABILITYOFENTRIESOFLEDGER); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + AvailabilityOfEntriesOfLedger.EMPTY_AVAILABILITYOFENTRIESOFLEDGER); + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + numLedgersFoundHavingLessThanAQReplicasOfAnEntry++; + + runTestScenario(returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger, 0, + numLedgersFoundHavingLessThanAQReplicasOfAnEntry, 0); + } + + /* + * In this testscenario all the ledgers have an entry with less than WQ + * number of copies but greater than AQ. So all closed ledgers should be + * counted towards numLedgersFoundHavingLessThanWQReplicasOfAnEntry. + */ + @Test + public void testReplicasCheckForLedgersFoundHavingLessThanWQReplicasOfAnEntry() throws Exception { + int numOfBookies = 5; + MultiKeyMap returnAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + List bookieAddresses = addAndRegisterBookies(numOfBookies); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + DigestType digestType = DigestType.DUMMY; + byte[] password = new byte[0]; + Collections.shuffle(bookieAddresses); + + int numLedgersFoundHavingLessThanWQReplicasOfAnEntry = 0; + + /* + * closed ledger + * + * for this ledger a copy of entry 3, so this ledger would be counted + * towards ledgersFoundHavingLessThanWQReplicasOfAnEntry. + */ + Map> segmentEnsembles = new LinkedHashMap>(); + int ensembleSize = 4; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + long lastEntryId = 3; + int length = 10000; + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + long ledgerId = 1L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 2, 3 })); + numLedgersFoundHavingLessThanWQReplicasOfAnEntry++; + + /* + * closed ledger with multiple segments + * + * for this ledger a copy of entry 0 and entry 2 are missing, so this + * ledger would be counted towards + * ledgersFoundHavingLessThanWQReplicasOfAnEntry. + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(2L, bookieAddresses.subList(1, 5)); + ledgerId = 2L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] {})); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(4).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 2, 3 })); + numLedgersFoundHavingLessThanWQReplicasOfAnEntry++; + + /* + * non-closed ledger with multiple segments + * + * since this is non-closed ledger, it should not be counted towards + * ledgersFoundHavingLessThanWQReplicasOfAnEntry + */ + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(2L, bookieAddresses.subList(1, 5)); + ledgerId = 3L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), + Long.toString(ledgerId), BKException.Code.NoSuchLedgerExistsException); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(4).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 2, 3 })); + + /* + * closed ledger. + * + * for this ledger entry 0 is overrreplicated, but a copy is missing in + * the set of bookies it is supposed to be. So it should be counted + * towards ledgersFoundHavingLessThanWQReplicasOfAnEntry. + */ + ensembleSize = 4; + writeQuorumSize = 3; + ackQuorumSize = 2; + lastEntryId = 1; + length = 1000; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + ledgerId = 4L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 3 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0 })); + numLedgersFoundHavingLessThanWQReplicasOfAnEntry++; + + /* + * this is closed ledger. + * + * For third bookie, asyncGetListOfEntriesOfLedger will fail with + * BookieHandleNotAvailableException, so this should not be counted + * against missing copies of an entry. Other than that, for both entries + * 0 and 1, a copy is missing. Hence this should be counted towards + * numLedgersFoundHavingLessThanWQReplicasOfAnEntry. + */ + ensembleSize = 3; + writeQuorumSize = 3; + ackQuorumSize = 2; + lastEntryId = 1; + length = 1000; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 3)); + ledgerId = 5L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + AvailabilityOfEntriesOfLedger.EMPTY_AVAILABILITYOFENTRIESOFLEDGER); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + errorReturnValueForGetAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), + Long.toString(ledgerId), BKException.Code.BookieHandleNotAvailableException); + numLedgersFoundHavingLessThanWQReplicasOfAnEntry++; + + runTestScenario(returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger, 0, 0, + numLedgersFoundHavingLessThanWQReplicasOfAnEntry); + } + + /* + * In this testscenario all the ledgers have empty segments. + */ + @Test + public void testReplicasCheckForLedgersWithEmptySegments() throws Exception { + int numOfBookies = 5; + MultiKeyMap returnAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + MultiKeyMap errorReturnValueForGetAvailabilityOfEntriesOfLedger = + new MultiKeyMap(); + List bookieAddresses = addAndRegisterBookies(numOfBookies); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerManager lm = mFactory.newLedgerManager(); + DigestType digestType = DigestType.DUMMY; + byte[] password = new byte[0]; + Collections.shuffle(bookieAddresses); + + int numLedgersFoundHavingNoReplicaOfAnEntry = 0; + int numLedgersFoundHavingLessThanAQReplicasOfAnEntry = 0; + int numLedgersFoundHavingLessThanWQReplicasOfAnEntry = 0; + + /* + * closed ledger. + * + * This closed Ledger has no entry. So it should not be counted towards + * numLedgersFoundHavingNoReplicaOfAnEntry/LessThanAQReplicasOfAnEntry + * /WQReplicasOfAnEntry. + */ + Map> segmentEnsembles = new LinkedHashMap>(); + int ensembleSize = 4; + int writeQuorumSize = 3; + int ackQuorumSize = 2; + long lastEntryId = -1L; + int length = 0; + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + long ledgerId = 1L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + + /* + * closed ledger with multiple segments. + * + * This ledger has empty last segment, but all the entries have + * writeQuorumSize number of copies, So it should not be counted towards + * numLedgersFoundHavingNoReplicaOfAnEntry/LessThanAQReplicasOfAnEntry/ + * WQReplicasOfAnEntry. + */ + lastEntryId = 2; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put((lastEntryId + 1), bookieAddresses.subList(1, 5)); + ledgerId = 2L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 2 })); + + /* + * Closed ledger with multiple segments. + * + * Segment0, Segment1, Segment3, Segment5 and Segment6 are empty. + * Entries from entryid 3 are missing. So it should be counted towards + * numLedgersFoundHavingNoReplicaOfAnEntry. + */ + lastEntryId = 5; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(4L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put(4L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put((lastEntryId + 1), bookieAddresses.subList(1, 5)); + segmentEnsembles.put((lastEntryId + 1), bookieAddresses.subList(0, 4)); + ledgerId = 3L; + createClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + lastEntryId, length, digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 2 })); + numLedgersFoundHavingNoReplicaOfAnEntry++; + + /* + * non-closed ledger with multiple segments + * + * since this is non-closed ledger, it should not be counted towards + * ledgersFoundHavingLessThanWQReplicasOfAnEntry + */ + lastEntryId = 2; + segmentEnsembles.clear(); + segmentEnsembles.put(0L, bookieAddresses.subList(0, 4)); + segmentEnsembles.put(0L, bookieAddresses.subList(1, 5)); + segmentEnsembles.put((lastEntryId + 1), bookieAddresses.subList(1, 5)); + ledgerId = 4L; + createNonClosedLedgerMetadata(lm, ledgerId, ensembleSize, writeQuorumSize, ackQuorumSize, segmentEnsembles, + digestType, password); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(0).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(1).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(2).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 0, 1, 2 })); + returnAvailabilityOfEntriesOfLedger.put(bookieAddresses.get(3).toString(), Long.toString(ledgerId), + new AvailabilityOfEntriesOfLedger(new long[] { 1, 2 })); + + runTestScenario(returnAvailabilityOfEntriesOfLedger, errorReturnValueForGetAvailabilityOfEntriesOfLedger, + numLedgersFoundHavingNoReplicaOfAnEntry, numLedgersFoundHavingLessThanAQReplicasOfAnEntry, + numLedgersFoundHavingLessThanWQReplicasOfAnEntry); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorRollingRestartTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorRollingRestartTest.java new file mode 100644 index 0000000000000..3e5081ed0ef9d --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuditorRollingRestartTest.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.meta.MetadataDrivers.runFunctionWithLedgerManagerFactory; +import static org.testng.AssertJUnit.assertEquals; +import com.google.common.util.concurrent.UncheckedExecutionException; +import lombok.Cleanup; +import org.apache.bookkeeper.client.BookKeeper.DigestType; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.LedgerAuditorManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.test.TestCallbacks; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Test auditor behaviours during a rolling restart. + */ +public class AuditorRollingRestartTest extends BookKeeperClusterTestCase { + + public AuditorRollingRestartTest() throws Exception { + super(3, 600); + // run the daemon within the bookie + setAutoRecoveryEnabled(true); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + @Override + protected void startBKCluster(String metadataServiceUri) throws Exception { + super.startBKCluster(metadataServiceUri.replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + } + + /** + * Test no auditing during restart if disabled. + */ + @Test + public void testAuditingDuringRollingRestart() throws Exception { + confByIndex(0).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + runFunctionWithLedgerManagerFactory( + confByIndex(0), + mFactory -> { + try { + testAuditingDuringRollingRestart(mFactory); + } catch (Exception e) { + throw new UncheckedExecutionException(e.getMessage(), e); + } + return null; + } + ); + } + + private void testAuditingDuringRollingRestart(LedgerManagerFactory mFactory) throws Exception { + final LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + + LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes()); + for (int i = 0; i < 10; i++) { + lh.asyncAddEntry("foobar".getBytes(), new TestCallbacks.AddCallbackFuture(i), null); + } + lh.addEntry("foobar".getBytes()); + lh.close(); + + assertEquals("shouldn't be anything under replicated", + underReplicationManager.pollLedgerToRereplicate(), -1); + underReplicationManager.disableLedgerReplication(); + + @Cleanup + LedgerAuditorManager lam = mFactory.newLedgerAuditorManager(); + BookieId auditor = lam.getCurrentAuditor(); + ServerConfiguration conf = killBookie(auditor); + Thread.sleep(2000); + startBookie(conf); + Thread.sleep(2000); // give it time to run + assertEquals("shouldn't be anything under replicated", -1, + underReplicationManager.pollLedgerToRereplicate()); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuthAutoRecoveryTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuthAutoRecoveryTest.java new file mode 100644 index 0000000000000..41e159b77714f --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AuthAutoRecoveryTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import org.apache.bookkeeper.auth.AuthCallbacks; +import org.apache.bookkeeper.auth.AuthToken; +import org.apache.bookkeeper.auth.ClientAuthProvider; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.proto.ClientConnectionPeer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * This test verifies the auditor bookie scenarios from the auth point-of-view. + */ +public class AuthAutoRecoveryTest extends BookKeeperClusterTestCase { + + private static final Logger LOG = LoggerFactory + .getLogger(AuthAutoRecoveryTest.class); + + public static final String TEST_AUTH_PROVIDER_PLUGIN_NAME = "TestAuthProviderPlugin"; + + private static String clientSideRole; + + private static class AuditorClientAuthInterceptorFactory + implements ClientAuthProvider.Factory { + + @Override + public String getPluginName() { + return TEST_AUTH_PROVIDER_PLUGIN_NAME; + } + + @Override + public void init(ClientConfiguration conf) { + clientSideRole = conf.getClientRole(); + } + + @Override + public ClientAuthProvider newProvider(ClientConnectionPeer addr, + final AuthCallbacks.GenericCallback completeCb) { + return new ClientAuthProvider() { + public void init(AuthCallbacks.GenericCallback cb) { + completeCb.operationComplete(BKException.Code.OK, null); + } + + public void process(AuthToken m, AuthCallbacks.GenericCallback cb) { + } + }; + } + } + + protected ServerConfiguration newServerConfiguration() throws Exception { + ServerConfiguration conf = super.newServerConfiguration(); + conf.setClientAuthProviderFactoryClass(AuditorClientAuthInterceptorFactory.class.getName()); + return conf; + } + + public AuthAutoRecoveryTest() { + super(6); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + /* + * test the client role of the auditor + */ + @Test + public void testAuthClientRole() throws Exception { + ServerConfiguration config = confByIndex(0); + assertEquals(AuditorClientAuthInterceptorFactory.class.getName(), config.getClientAuthProviderFactoryClass()); + AutoRecoveryMain main = new AutoRecoveryMain(config); + try { + main.start(); + Thread.sleep(500); + assertTrue("AuditorElector should be running", + main.auditorElector.isRunning()); + assertTrue("Replication worker should be running", + main.replicationWorker.isRunning()); + } finally { + main.shutdown(); + } + assertEquals(ClientConfiguration.CLIENT_ROLE_SYSTEM, clientSideRole); + } + +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AutoRecoveryMainTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AutoRecoveryMainTest.java new file mode 100644 index 0000000000000..1d741c551ddb9 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/AutoRecoveryMainTest.java @@ -0,0 +1,248 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertTrue; +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.util.TestUtils; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory; +import org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.zookeeper.ZooKeeper; +import org.awaitility.Awaitility; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Test the AuditorPeer. + */ +public class AutoRecoveryMainTest extends BookKeeperClusterTestCase { + + public AutoRecoveryMainTest() throws Exception { + super(3); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + /** + * Test the startup of the auditorElector and RW. + */ + @Test + public void testStartup() throws Exception { + confByIndex(0).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + AutoRecoveryMain main = new AutoRecoveryMain(confByIndex(0)); + try { + main.start(); + Thread.sleep(500); + assertTrue("AuditorElector should be running", + main.auditorElector.isRunning()); + assertTrue("Replication worker should be running", + main.replicationWorker.isRunning()); + } finally { + main.shutdown(); + } + } + + /* + * Test the shutdown of all daemons + */ + @Test + public void testShutdown() throws Exception { + confByIndex(0).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + AutoRecoveryMain main = new AutoRecoveryMain(confByIndex(0)); + main.start(); + Thread.sleep(500); + assertTrue("AuditorElector should be running", + main.auditorElector.isRunning()); + assertTrue("Replication worker should be running", + main.replicationWorker.isRunning()); + + main.shutdown(); + assertFalse("AuditorElector should not be running", + main.auditorElector.isRunning()); + assertFalse("Replication worker should not be running", + main.replicationWorker.isRunning()); + } + + /** + * Test that, if an autorecovery looses its ZK connection/session it will + * shutdown. + */ + @Test + public void testAutoRecoverySessionLoss() throws Exception { + confByIndex(0).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + confByIndex(1).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + confByIndex(2).setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + /* + * initialize three AutoRecovery instances. + */ + AutoRecoveryMain main1 = new AutoRecoveryMain(confByIndex(0)); + AutoRecoveryMain main2 = new AutoRecoveryMain(confByIndex(1)); + AutoRecoveryMain main3 = new AutoRecoveryMain(confByIndex(2)); + + /* + * start main1, make sure all the components are started and main1 is + * the current Auditor + */ + PulsarMetadataClientDriver pulsarMetadataClientDriver1 = startAutoRecoveryMain(main1); + ZooKeeper zk1 = getZk(pulsarMetadataClientDriver1); + + // Wait until auditor gets elected + for (int i = 0; i < 10; i++) { + try { + if (main1.auditorElector.getCurrentAuditor() != null) { + break; + } else { + Thread.sleep(1000); + } + } catch (IOException e) { + Thread.sleep(1000); + } + } + BookieId currentAuditor = main1.auditorElector.getCurrentAuditor(); + assertNotNull(currentAuditor); + Auditor auditor1 = main1.auditorElector.getAuditor(); + assertEquals("Current Auditor should be AR1", currentAuditor, BookieImpl.getBookieId(confByIndex(0))); + Awaitility.waitAtMost(30, TimeUnit.SECONDS).untilAsserted(() -> { + assertNotNull(auditor1); + assertTrue("Auditor of AR1 should be running", auditor1.isRunning()); + }); + + + /* + * start main2 and main3 + */ + PulsarMetadataClientDriver pulsarMetadataClientDriver2 = startAutoRecoveryMain(main2); + ZooKeeper zk2 = getZk(pulsarMetadataClientDriver2); + + PulsarMetadataClientDriver pulsarMetadataClientDriver3 = startAutoRecoveryMain(main3); + ZooKeeper zk3 = getZk(pulsarMetadataClientDriver3); + + + /* + * make sure AR1 is still the current Auditor and AR2's and AR3's + * auditors are not running. + */ + assertEquals("Current Auditor should still be AR1", currentAuditor, BookieImpl.getBookieId(confByIndex(0))); + Awaitility.await().untilAsserted(() -> { + assertTrue("AR2's Auditor should not be running", (main2.auditorElector.getAuditor() == null + || !main2.auditorElector.getAuditor().isRunning())); + assertTrue("AR3's Auditor should not be running", (main3.auditorElector.getAuditor() == null + || !main3.auditorElector.getAuditor().isRunning())); + }); + + + /* + * expire zk2 and zk1 sessions. + */ + zkUtil.expireSession(zk2); + zkUtil.expireSession(zk1); + + /* + * wait for some time for all the components of AR1 and AR2 are + * shutdown. + */ + for (int i = 0; i < 10; i++) { + if (!main1.auditorElector.isRunning() && !main1.replicationWorker.isRunning() + && !main1.isAutoRecoveryRunning() && !main2.auditorElector.isRunning() + && !main2.replicationWorker.isRunning() && !main2.isAutoRecoveryRunning()) { + break; + } + Thread.sleep(1000); + } + + /* + * the AR3 should be current auditor. + */ + currentAuditor = main3.auditorElector.getCurrentAuditor(); + assertEquals("Current Auditor should be AR3", currentAuditor, BookieImpl.getBookieId(confByIndex(2))); + Awaitility.await().untilAsserted(() -> { + assertNotNull(main3.auditorElector.getAuditor()); + assertTrue("Auditor of AR3 should be running", main3.auditorElector.getAuditor().isRunning()); + }); + + Awaitility.waitAtMost(100, TimeUnit.SECONDS).untilAsserted(() -> { + /* + * since AR3 is current auditor, AR1's auditor should not be running + * anymore. + */ + assertFalse("AR1's auditor should not be running", auditor1.isRunning()); + + /* + * components of AR2 and AR3 should not be running since zk1 and zk2 + * sessions are expired. + */ + assertFalse("Elector1 should have shutdown", main1.auditorElector.isRunning()); + assertFalse("RW1 should have shutdown", main1.replicationWorker.isRunning()); + assertFalse("AR1 should have shutdown", main1.isAutoRecoveryRunning()); + assertFalse("Elector2 should have shutdown", main2.auditorElector.isRunning()); + assertFalse("RW2 should have shutdown", main2.replicationWorker.isRunning()); + assertFalse("AR2 should have shutdown", main2.isAutoRecoveryRunning()); + }); + + } + + /* + * start autoRecoveryMain and make sure all its components are running and + * myVote node is existing + */ + PulsarMetadataClientDriver startAutoRecoveryMain(AutoRecoveryMain autoRecoveryMain) throws Exception { + autoRecoveryMain.start(); + PulsarMetadataClientDriver pulsarMetadataClientDriver = (PulsarMetadataClientDriver) autoRecoveryMain.bkc + .getMetadataClientDriver(); + TestUtils.assertEventuallyTrue("autoRecoveryMain components should be running", + () -> autoRecoveryMain.auditorElector.isRunning() + && autoRecoveryMain.replicationWorker.isRunning() && autoRecoveryMain.isAutoRecoveryRunning()); + return pulsarMetadataClientDriver; + } + + private ZooKeeper getZk(PulsarMetadataClientDriver pulsarMetadataClientDriver) throws Exception { + PulsarLedgerManagerFactory pulsarLedgerManagerFactory = + (PulsarLedgerManagerFactory) pulsarMetadataClientDriver.getLedgerManagerFactory(); + Field field = pulsarLedgerManagerFactory.getClass().getDeclaredField("store"); + field.setAccessible(true); + ZKMetadataStore zkMetadataStore = (ZKMetadataStore) field.get(pulsarLedgerManagerFactory); + return zkMetadataStore.getZkClient(); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookKeeperClusterTestCase.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookKeeperClusterTestCase.java new file mode 100644 index 0000000000000..c681a1f0764ee --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookKeeperClusterTestCase.java @@ -0,0 +1,851 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * This file is derived from BookKeeperClusterTestCase from Apache BookKeeper + * http://bookkeeper.apache.org + */ + +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.util.BookKeeperConstants.AVAILABLE_NODE; +import static org.apache.pulsar.common.util.PortManager.nextLockedFreePort; +import static org.testng.Assert.assertFalse; +import com.google.common.base.Stopwatch; +import java.io.File; +import java.io.IOException; +import java.lang.reflect.Method; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.bookkeeper.bookie.Bookie; +import org.apache.bookkeeper.bookie.BookieException; +import org.apache.bookkeeper.client.BookKeeperTestClient; +import org.apache.bookkeeper.client.TestStatsProvider; +import org.apache.bookkeeper.common.allocator.PoolingPolicy; +import org.apache.bookkeeper.conf.AbstractConfiguration; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.conf.TestBKConfiguration; +import org.apache.bookkeeper.metastore.InMemoryMetaStore; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.net.BookieSocketAddress; +import org.apache.bookkeeper.proto.BookieServer; +import org.apache.bookkeeper.test.ServerTester; +import org.apache.bookkeeper.test.TmpDirs; +import org.apache.bookkeeper.test.ZooKeeperCluster; +import org.apache.bookkeeper.test.ZooKeeperClusterUtil; +import org.apache.pulsar.common.util.PortManager; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.impl.FaultInjectionMetadataStore; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooKeeper; +import org.awaitility.Awaitility; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterTest; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.BeforeTest; + +/** + * A class runs several bookie servers for testing. + */ +public abstract class BookKeeperClusterTestCase { + + static final Logger LOG = LoggerFactory.getLogger(BookKeeperClusterTestCase.class); + + protected String testName; + + @BeforeMethod + public void handleTestMethodName(Method method) { + testName = method.getName(); + } + + // Metadata service related variables + protected final ZooKeeperCluster zkUtil; + protected ZooKeeper zkc; + protected String metadataServiceUri; + protected FaultInjectionMetadataStore metadataStore; + + // BookKeeper related variables + protected final TmpDirs tmpDirs = new TmpDirs(); + protected final List servers = new LinkedList<>(); + + protected int numBookies; + protected BookKeeperTestClient bkc; + protected boolean useUUIDasBookieId = true; + + /* + * Loopback interface is set as the listening interface and allowloopback is + * set to true in this server config. So bookies in this test process would + * bind to loopback address. + */ + protected final ServerConfiguration baseConf = TestBKConfiguration.newServerConfiguration(); + protected final ClientConfiguration baseClientConf = TestBKConfiguration.newClientConfiguration(); + + private boolean isAutoRecoveryEnabled; + protected ExecutorService executor; + private final List bookiePorts = new ArrayList<>(); + + SynchronousQueue asyncExceptions = new SynchronousQueue<>(); + protected void captureThrowable(Runnable c) { + try { + c.run(); + } catch (Throwable e) { + LOG.error("Captured error: ", e); + asyncExceptions.add(e); + } + } + + public BookKeeperClusterTestCase(int numBookies) { + this(numBookies, 120); + } + + public BookKeeperClusterTestCase(int numBookies, int testTimeoutSecs) { + this(numBookies, 1, testTimeoutSecs); + } + + public BookKeeperClusterTestCase(int numBookies, int numOfZKNodes, int testTimeoutSecs) { + this.numBookies = numBookies; + if (numOfZKNodes == 1) { + zkUtil = new ZooKeeperUtil(getLedgersRootPath()); + } else { + try { + zkUtil = new ZooKeeperClusterUtil(numOfZKNodes); + } catch (IOException | KeeperException | InterruptedException e) { + throw new RuntimeException(e); + } + } + } + + @BeforeTest + public void setUp() throws Exception { + setUp(getLedgersRootPath()); + } + + protected void setUp(String ledgersRootPath) throws Exception { + LOG.info("Setting up test {}", getClass()); + InMemoryMetaStore.reset(); + setMetastoreImplClass(baseConf); + setMetastoreImplClass(baseClientConf); + executor = Executors.newCachedThreadPool(); + + Stopwatch sw = Stopwatch.createStarted(); + try { + // start zookeeper service + startZKCluster(); + // start bookkeeper service + this.metadataServiceUri = getMetadataServiceUri(ledgersRootPath); + startBKCluster(metadataServiceUri); + LOG.info("Setup testcase {} @ metadata service {} in {} ms.", + testName, metadataServiceUri, sw.elapsed(TimeUnit.MILLISECONDS)); + } catch (Exception e) { + LOG.error("Error setting up", e); + throw e; + } + } + + protected String getMetadataServiceUri(String ledgersRootPath) { + return zkUtil.getMetadataServiceUri(ledgersRootPath); + } + + private String getLedgersRootPath() { + return changeLedgerPath() + "/ledgers"; + } + + protected String changeLedgerPath() { + return ""; + } + + @AfterTest(alwaysRun = true) + public void tearDown() throws Exception { + boolean failed = false; + for (Throwable e : asyncExceptions) { + LOG.error("Got async exception: ", e); + failed = true; + } + assertFalse(failed, "Async failure"); + Stopwatch sw = Stopwatch.createStarted(); + LOG.info("TearDown"); + Exception tearDownException = null; + // stop bookkeeper service + try { + stopBKCluster(); + } catch (Exception e) { + LOG.error("Got Exception while trying to stop BKCluster", e); + tearDownException = e; + } + // stop zookeeper service + try { + // cleanup for metrics. + metadataStore.close(); + stopZKCluster(); + } catch (Exception e) { + LOG.error("Got Exception while trying to stop ZKCluster", e); + tearDownException = e; + } + // cleanup temp dirs + try { + tmpDirs.cleanup(); + } catch (Exception e) { + LOG.error("Got Exception while trying to cleanupTempDirs", e); + tearDownException = e; + } + + executor.shutdownNow(); + + LOG.info("Tearing down test {} in {} ms.", testName, sw.elapsed(TimeUnit.MILLISECONDS)); + if (tearDownException != null) { + throw tearDownException; + } + } + + /** + * Start zookeeper cluster. + * + * @throws Exception + */ + protected void startZKCluster() throws Exception { + zkUtil.startCluster(); + zkc = zkUtil.getZooKeeperClient(); + metadataStore = new FaultInjectionMetadataStore( + MetadataStoreExtended.create(zkUtil.getZooKeeperConnectString(), + MetadataStoreConfig.builder().build())); + } + + /** + * Stop zookeeper cluster. + * + * @throws Exception + */ + protected void stopZKCluster() throws Exception { + zkUtil.killCluster(); + } + + /** + * Start cluster. Also, starts the auto recovery process for each bookie, if + * isAutoRecoveryEnabled is true. + * + * @throws Exception + */ + protected void startBKCluster(String metadataServiceUri) throws Exception { + baseConf.setMetadataServiceUri(metadataServiceUri); + baseClientConf.setMetadataServiceUri(metadataServiceUri); + baseClientConf.setAllocatorPoolingPolicy(PoolingPolicy.UnpooledHeap); + + if (numBookies > 0) { + bkc = new BookKeeperTestClient(baseClientConf, new TestStatsProvider()); + } + + // Create Bookie Servers (B1, B2, B3) + for (int i = 0; i < numBookies; i++) { + bookiePorts.add(startNewBookie()); + } + } + + /** + * Stop cluster. Also, stops all the auto recovery processes for the bookie + * cluster, if isAutoRecoveryEnabled is true. + * + * @throws Exception + */ + protected void stopBKCluster() throws Exception { + if (bkc != null) { + bkc.close(); + } + + for (ServerTester t : servers) { + t.shutdown(); + } + servers.clear(); + bookiePorts.removeIf(PortManager::releaseLockedPort); + } + + protected ServerConfiguration newServerConfiguration() throws Exception { + File f = tmpDirs.createNew("bookie", "test"); + + int port; + if (baseConf.isEnableLocalTransport() || !baseConf.getAllowEphemeralPorts()) { + port = nextLockedFreePort(); + } else { + port = 0; + } + return newServerConfiguration(port, f, new File[] { f }); + } + + protected ClientConfiguration newClientConfiguration() { + return new ClientConfiguration(baseConf); + } + + protected ServerConfiguration newServerConfiguration(int port, File journalDir, File[] ledgerDirs) { + ServerConfiguration conf = new ServerConfiguration(baseConf); + conf.setBookiePort(port); + conf.setJournalDirName(journalDir.getPath()); + String[] ledgerDirNames = new String[ledgerDirs.length]; + for (int i = 0; i < ledgerDirs.length; i++) { + ledgerDirNames[i] = ledgerDirs[i].getPath(); + } + conf.setLedgerDirNames(ledgerDirNames); + conf.setEnableTaskExecutionStats(true); + conf.setAllocatorPoolingPolicy(PoolingPolicy.UnpooledHeap); + return conf; + } + + protected void stopAllBookies() throws Exception { + stopAllBookies(true); + } + + protected void stopAllBookies(boolean shutdownClient) throws Exception { + for (ServerTester t : servers) { + t.shutdown(); + } + servers.clear(); + if (shutdownClient && bkc != null) { + bkc.close(); + bkc = null; + } + } + + protected String newMetadataServiceUri(String ledgersRootPath) { + return zkUtil.getMetadataServiceUri(ledgersRootPath); + } + + protected String newMetadataServiceUri(String ledgersRootPath, String type) { + return zkUtil.getMetadataServiceUri(ledgersRootPath, type); + } + + /** + * Get bookie address for bookie at index. + */ + public BookieId getBookie(int index) throws Exception { + return servers.get(index).getServer().getBookieId(); + } + + protected List bookieAddresses() throws Exception { + List bookieIds = new ArrayList<>(); + for (ServerTester a : servers) { + bookieIds.add(a.getServer().getBookieId()); + } + return bookieIds; + } + + protected List bookieLedgerDirs() throws Exception { + return servers.stream() + .flatMap(t -> Arrays.stream(t.getConfiguration().getLedgerDirs())) + .collect(Collectors.toList()); + } + + protected List bookieJournalDirs() throws Exception { + return servers.stream() + .flatMap(t -> Arrays.stream(t.getConfiguration().getJournalDirs())) + .collect(Collectors.toList()); + } + + protected BookieId addressByIndex(int index) throws Exception { + return servers.get(index).getServer().getBookieId(); + } + + protected BookieServer serverByIndex(int index) throws Exception { + return servers.get(index).getServer(); + } + + protected ServerConfiguration confByIndex(int index) throws Exception { + return servers.get(index).getConfiguration(); + } + + private Optional byAddress(BookieId addr) throws UnknownHostException { + for (ServerTester s : servers) { + if (s.getServer().getBookieId().equals(addr)) { + return Optional.of(s); + } + } + return Optional.empty(); + } + + protected int indexOfServer(BookieServer b) throws Exception { + for (int i = 0; i < servers.size(); i++) { + if (servers.get(i).getServer().equals(b)) { + return i; + } + } + return -1; + } + + protected int lastBookieIndex() { + return servers.size() - 1; + } + + protected int bookieCount() { + return servers.size(); + } + + private OptionalInt indexByAddress(BookieId addr) throws UnknownHostException { + for (int i = 0; i < servers.size(); i++) { + if (addr.equals(servers.get(i).getServer().getBookieId())) { + return OptionalInt.of(i); + } + } + return OptionalInt.empty(); + } + + /** + * Get bookie configuration for bookie. + */ + public ServerConfiguration getBkConf(BookieId addr) throws Exception { + return byAddress(addr).get().getConfiguration(); + } + + /** + * Kill a bookie by its socket address. Also, stops the autorecovery process + * for the corresponding bookie server, if isAutoRecoveryEnabled is true. + * + * @param addr + * Socket Address + * @return the configuration of killed bookie + * @throws InterruptedException + */ + public ServerConfiguration killBookie(BookieId addr) throws Exception { + Optional tester = byAddress(addr); + if (tester.isPresent()) { + if (tester.get().autoRecovery != null + && tester.get().autoRecovery.getAuditor() != null + && tester.get().autoRecovery.getAuditor().isRunning()) { + LOG.warn("Killing bookie {} who is the current Auditor", addr); + } + servers.remove(tester.get()); + tester.get().shutdown(); + return tester.get().getConfiguration(); + } + return null; + } + + /** + * Set the bookie identified by its socket address to readonly. + * + * @param addr + * Socket Address + * @throws InterruptedException + */ + public void setBookieToReadOnly(BookieId addr) throws Exception { + Optional tester = byAddress(addr); + if (tester.isPresent()) { + tester.get().getServer().getBookie().getStateManager().transitionToReadOnlyMode().get(); + } + } + + /** + * Kill a bookie by index. Also, stops the respective auto recovery process + * for this bookie, if isAutoRecoveryEnabled is true. + * + * @param index + * Bookie Index + * @return the configuration of killed bookie + * @throws InterruptedException + * @throws IOException + */ + public ServerConfiguration killBookie(int index) throws Exception { + ServerTester tester = servers.remove(index); + tester.shutdown(); + return tester.getConfiguration(); + } + + /** + * Kill bookie by index and verify that it's stopped. + * + * @param index index of bookie to kill + * + * @return configuration of killed bookie + */ + public ServerConfiguration killBookieAndWaitForZK(int index) throws Exception { + ServerTester tester = servers.get(index); // IKTODO: this method is awful + ServerConfiguration ret = killBookie(index); + while (zkc.exists("/ledgers/" + AVAILABLE_NODE + "/" + + tester.getServer().getBookieId().toString(), false) != null) { + Thread.sleep(500); + } + return ret; + } + + /** + * Sleep a bookie. + * + * @param addr + * Socket Address + * @param seconds + * Sleep seconds + * @return Count Down latch which will be counted down just after sleep begins + * @throws InterruptedException + * @throws IOException + */ + public CountDownLatch sleepBookie(BookieId addr, final int seconds) + throws Exception { + Optional tester = byAddress(addr); + if (tester.isPresent()) { + CountDownLatch latch = new CountDownLatch(1); + Thread sleeper = new Thread() { + @Override + public void run() { + try { + tester.get().getServer().suspendProcessing(); + LOG.info("bookie {} is asleep", tester.get().getAddress()); + latch.countDown(); + Thread.sleep(seconds * 1000); + tester.get().getServer().resumeProcessing(); + LOG.info("bookie {} is awake", tester.get().getAddress()); + } catch (Exception e) { + LOG.error("Error suspending bookie", e); + } + } + }; + sleeper.start(); + return latch; + } else { + throw new IOException("Bookie not found"); + } + } + + /** + * Sleep a bookie until I count down the latch. + * + * @param addr + * Socket Address + * @param l + * Latch to wait on + * @throws InterruptedException + * @throws IOException + */ + public void sleepBookie(BookieId addr, final CountDownLatch l) + throws InterruptedException, IOException { + final CountDownLatch suspendLatch = new CountDownLatch(1); + sleepBookie(addr, l, suspendLatch); + suspendLatch.await(); + } + + public void sleepBookie(BookieId addr, final CountDownLatch l, final CountDownLatch suspendLatch) + throws InterruptedException, IOException { + Optional tester = byAddress(addr); + if (tester.isPresent()) { + BookieServer bookie = tester.get().getServer(); + LOG.info("Sleep bookie {}.", addr); + Thread sleeper = new Thread() { + @Override + public void run() { + try { + bookie.suspendProcessing(); + if (null != suspendLatch) { + suspendLatch.countDown(); + } + l.await(); + bookie.resumeProcessing(); + } catch (Exception e) { + LOG.error("Error suspending bookie", e); + } + } + }; + sleeper.start(); + } else { + throw new IOException("Bookie not found"); + } + } + + /** + * Restart bookie servers. Also restarts all the respective auto recovery + * process, if isAutoRecoveryEnabled is true. + * + * @throws InterruptedException + * @throws IOException + * @throws KeeperException + * @throws BookieException + */ + public void restartBookies() + throws Exception { + restartBookies(c -> c); + } + + /** + * Restart a bookie. Also restart the respective auto recovery process, + * if isAutoRecoveryEnabled is true. + * + * @param addr + * @throws InterruptedException + * @throws IOException + * @throws KeeperException + * @throws BookieException + */ + public void restartBookie(BookieId addr) throws Exception { + OptionalInt toRemove = indexByAddress(addr); + if (toRemove.isPresent()) { + ServerConfiguration newConfig = killBookie(toRemove.getAsInt()); + Thread.sleep(1000); + startAndAddBookie(newConfig); + } else { + throw new IOException("Bookie not found"); + } + } + + public void restartBookies(Function reconfFunction) + throws Exception { + // shut down bookie server + List confs = new ArrayList<>(); + for (ServerTester server : servers) { + server.shutdown(); + confs.add(server.getConfiguration()); + } + servers.clear(); + Thread.sleep(1000); + // restart them to ensure we can't + for (ServerConfiguration conf : confs) { + // ensure the bookie port is loaded correctly + startAndAddBookie(reconfFunction.apply(conf)); + } + } + + /** + * Helper method to startup a new bookie server with the indicated port + * number. Also, starts the auto recovery process, if the + * isAutoRecoveryEnabled is set true. + * + * @throws IOException + */ + public int startNewBookie() + throws Exception { + return startNewBookieAndReturnAddress().getPort(); + } + + public BookieSocketAddress startNewBookieAndReturnAddress() + throws Exception { + ServerConfiguration conf = newServerConfiguration(); + LOG.info("Starting new bookie on port: {}", conf.getBookiePort()); + return startAndAddBookie(conf).getServer().getLocalAddress(); + } + + public BookieId startNewBookieAndReturnBookieId() + throws Exception { + ServerConfiguration conf = newServerConfiguration(); + LOG.info("Starting new bookie on port: {}", conf.getBookiePort()); + return startAndAddBookie(conf).getServer().getBookieId(); + } + + protected ServerTester startAndAddBookie(ServerConfiguration conf) throws Exception { + ServerTester server = startBookie(conf); + servers.add(server); + return server; + } + + protected ServerTester startAndAddBookie(ServerConfiguration conf, Bookie b) throws Exception { + ServerTester server = startBookie(conf, b); + servers.add(server); + return server; + } + /** + * Helper method to startup a bookie server using a configuration object. + * Also, starts the auto recovery process if isAutoRecoveryEnabled is true. + * + * @param conf + * Server Configuration Object + * + */ + protected ServerTester startBookie(ServerConfiguration conf) + throws Exception { + ServerTester tester = new ServerTester(conf); + + if (bkc == null) { + bkc = new BookKeeperTestClient(baseClientConf, new TestStatsProvider()); + } + + BookieId address = tester.getServer().getBookieId(); + Future waitForBookie = conf.isForceReadOnlyBookie() + ? bkc.waitForReadOnlyBookie(address) + : bkc.waitForWritableBookie(address); + + tester.getServer().start(); + + waitForBookie.get(30, TimeUnit.SECONDS); + LOG.info("New bookie '{}' has been created.", address); + + if (isAutoRecoveryEnabled()) { + tester.startAutoRecovery(); + } + + int port = conf.getBookiePort(); + + Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> { + while (zkc.exists("/ledgers/" + AVAILABLE_NODE + "/" + + tester.getServer().getBookieId().toString(), false) == null) { + Thread.sleep(100); + } + return true; + }); + bkc.readBookiesBlocking(); + + LOG.info("New bookie on port " + port + " has been created."); + + return tester; + } + + /** + * Start a bookie with the given bookie instance. Also, starts the auto + * recovery for this bookie, if isAutoRecoveryEnabled is true. + */ + protected ServerTester startBookie(ServerConfiguration conf, final Bookie b) + throws Exception { + ServerTester tester = new ServerTester(conf, b); + if (bkc == null) { + bkc = new BookKeeperTestClient(baseClientConf, new TestStatsProvider()); + } + BookieId address = tester.getServer().getBookieId(); + Future waitForBookie = conf.isForceReadOnlyBookie() + ? bkc.waitForReadOnlyBookie(address) + : bkc.waitForWritableBookie(address); + + tester.getServer().start(); + + waitForBookie.get(30, TimeUnit.SECONDS); + + if (isAutoRecoveryEnabled()) { + tester.startAutoRecovery(); + } + + int port = conf.getBookiePort(); + Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> + metadataStore.exists( + getLedgersRootPath() + "/available/" + address).join() + ); + bkc.readBookiesBlocking(); + + LOG.info("New bookie '{}' has been created.", address); + return tester; + } + + public void setMetastoreImplClass(AbstractConfiguration conf) { + conf.setMetastoreImplClass(InMemoryMetaStore.class.getName()); + } + + /** + * Flags used to enable/disable the auto recovery process. If it is enabled, + * starting the bookie server will starts the auto recovery process for that + * bookie. Also, stopping bookie will stops the respective auto recovery + * process. + * + * @param isAutoRecoveryEnabled + * Value true will enable the auto recovery process. Value false + * will disable the auto recovery process + */ + public void setAutoRecoveryEnabled(boolean isAutoRecoveryEnabled) { + this.isAutoRecoveryEnabled = isAutoRecoveryEnabled; + } + + /** + * Flag used to check whether auto recovery process is enabled/disabled. By + * default the flag is false. + * + * @return true, if the auto recovery is enabled. Otherwise return false. + */ + public boolean isAutoRecoveryEnabled() { + return isAutoRecoveryEnabled; + } + + /** + * Will starts the auto recovery process for the bookie servers. One auto + * recovery process per each bookie server, if isAutoRecoveryEnabled is + * enabled. + */ + public void startReplicationService() throws Exception { + for (ServerTester t : servers) { + t.startAutoRecovery(); + } + } + + /** + * Will stops all the auto recovery processes for the bookie cluster, if + * isAutoRecoveryEnabled is true. + */ + public void stopReplicationService() throws Exception{ + for (ServerTester t : servers) { + t.stopAutoRecovery(); + } + } + + public Auditor getAuditor(int timeout, TimeUnit unit) throws Exception { + final long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, unit); + while (System.nanoTime() < timeoutAt) { + for (ServerTester t : servers) { + Auditor a = t.getAuditor(); + ReplicationWorker replicationWorker = t.getReplicationWorker(); + + // found a candidate Auditor + ReplicationWorker + if (a != null && a.isRunning() + && replicationWorker != null && replicationWorker.isRunning()) { + int deathWatchInterval = t.getConfiguration().getDeathWatchInterval(); + Thread.sleep(deathWatchInterval + 1000); + } + + // double check, because in the meantime AutoRecoveryDeathWatcher may have killed the + // AutoRecovery daemon + if (a != null && a.isRunning() + && replicationWorker != null && replicationWorker.isRunning()) { + LOG.info("Found Auditor Bookie {}", t.getServer().getBookieId()); + return a; + } + } + Thread.sleep(100); + } + throw new Exception("No auditor found"); + } + + /** + * Check whether the InetSocketAddress was created using a hostname or an IP + * address. Represent as 'hostname/IPaddress' if the InetSocketAddress was + * created using hostname. Represent as '/IPaddress' if the + * InetSocketAddress was created using an IPaddress + * + * @param bookieId id + * @return true if the address was created using an IP address, false if the + * address was created using a hostname + */ + public boolean isCreatedFromIp(BookieId bookieId) { + BookieSocketAddress addr = bkc.getBookieAddressResolver().resolve(bookieId); + return addr.getSocketAddress().toString().startsWith("/"); + } + + public void resetBookieOpLoggers() { + servers.forEach(t -> t.getStatsProvider().clear()); + } + + public TestStatsProvider getStatsProvider(BookieId addr) throws UnknownHostException { + return byAddress(addr).get().getStatsProvider(); + } + + public TestStatsProvider getStatsProvider(int index) throws Exception { + return servers.get(index).getStatsProvider(); + } + +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieAutoRecoveryTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieAutoRecoveryTest.java new file mode 100644 index 0000000000000..888303d3e665c --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieAutoRecoveryTest.java @@ -0,0 +1,651 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertNull; +import static org.testng.AssertJUnit.assertTrue; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.SortedMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper.DigestType; +import org.apache.bookkeeper.client.BookKeeperTestClient; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.common.util.OrderedScheduler; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataClientDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.meta.ZkLedgerUnderreplicationManager; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.proto.BookieServer; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.Watcher.Event.EventType; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Integration tests verifies the complete functionality of the + * Auditor-rereplication process: Auditor will publish the bookie failures, + * consequently ReplicationWorker will get the notifications and act on it. + */ +public class BookieAutoRecoveryTest extends BookKeeperClusterTestCase { + private static final Logger LOG = LoggerFactory + .getLogger(BookieAutoRecoveryTest.class); + private static final byte[] PASSWD = "admin".getBytes(); + private static final byte[] data = "TESTDATA".getBytes(); + private static final String openLedgerRereplicationGracePeriod = "3000"; // milliseconds + + private DigestType digestType; + private MetadataClientDriver metadataClientDriver; + private LedgerManagerFactory mFactory; + private LedgerUnderreplicationManager underReplicationManager; + private LedgerManager ledgerManager; + private OrderedScheduler scheduler; + + private final String underreplicatedPath = "/ledgers/underreplication/ledgers"; + + public BookieAutoRecoveryTest() throws Exception { + super(3); + + baseConf.setLedgerManagerFactoryClassName( + "org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory"); + baseConf.setOpenLedgerRereplicationGracePeriod(openLedgerRereplicationGracePeriod); + baseConf.setRwRereplicateBackoffMs(500); + baseClientConf.setLedgerManagerFactoryClassName( + "org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory"); + this.digestType = DigestType.MAC; + setAutoRecoveryEnabled(true); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + baseClientConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + + scheduler = OrderedScheduler.newSchedulerBuilder() + .name("test-scheduler") + .numThreads(1) + .build(); + + metadataClientDriver = MetadataDrivers.getClientDriver( + URI.create(baseClientConf.getMetadataServiceUri())); + metadataClientDriver.initialize( + baseClientConf, + scheduler, + NullStatsLogger.INSTANCE, + Optional.empty()); + + // initialize urReplicationManager + mFactory = metadataClientDriver.getLedgerManagerFactory(); + underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + ledgerManager = mFactory.newLedgerManager(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + + if (null != underReplicationManager) { + underReplicationManager.close(); + underReplicationManager = null; + } + if (null != ledgerManager) { + ledgerManager.close(); + ledgerManager = null; + } + if (null != metadataClientDriver) { + metadataClientDriver.close(); + metadataClientDriver = null; + } + if (null != scheduler) { + scheduler.shutdown(); + } + } + + /** + * Test verifies publish urLedger by Auditor and replication worker is + * picking up the entries and finishing the rereplication of open ledger. + */ + @Test + public void testOpenLedgers() throws Exception { + List listOfLedgerHandle = createLedgersAndAddEntries(1, 5); + LedgerHandle lh = listOfLedgerHandle.get(0); + int ledgerReplicaIndex = 0; + BookieId replicaToKillAddr = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + final String urLedgerZNode = getUrLedgerZNode(lh); + ledgerReplicaIndex = getReplicaIndexInLedger(lh, replicaToKillAddr); + + CountDownLatch latch = new CountDownLatch(1); + assertNull("UrLedger already exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + LOG.info("Killing Bookie :" + replicaToKillAddr); + killBookie(replicaToKillAddr); + + // waiting to publish urLedger znode by Auditor + latch.await(); + latch = new CountDownLatch(1); + LOG.info("Watching on urLedgerPath:" + urLedgerZNode + + " to know the status of rereplication process"); + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + // starting the replication service, so that he will be able to act as + // target bookie + startNewBookie(); + int newBookieIndex = lastBookieIndex(); + BookieServer newBookieServer = serverByIndex(newBookieIndex); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to finish the replication of failed bookie : " + + replicaToKillAddr); + } + latch.await(); + + // grace period to update the urledger metadata in zookeeper + LOG.info("Waiting to update the urledger metadata in zookeeper"); + + verifyLedgerEnsembleMetadataAfterReplication(newBookieServer, + listOfLedgerHandle.get(0), ledgerReplicaIndex); + } + + /** + * Test verifies publish urLedger by Auditor and replication worker is + * picking up the entries and finishing the rereplication of closed ledgers. + */ + @Test + public void testClosedLedgers() throws Exception { + List listOfReplicaIndex = new ArrayList(); + List listOfLedgerHandle = createLedgersAndAddEntries(1, 5); + closeLedgers(listOfLedgerHandle); + LedgerHandle lhandle = listOfLedgerHandle.get(0); + int ledgerReplicaIndex = 0; + BookieId replicaToKillAddr = lhandle.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + CountDownLatch latch = new CountDownLatch(listOfLedgerHandle.size()); + for (LedgerHandle lh : listOfLedgerHandle) { + ledgerReplicaIndex = getReplicaIndexInLedger(lh, replicaToKillAddr); + listOfReplicaIndex.add(ledgerReplicaIndex); + assertNull("UrLedger already exists!", + watchUrLedgerNode(getUrLedgerZNode(lh), latch)); + } + + LOG.info("Killing Bookie :" + replicaToKillAddr); + killBookie(replicaToKillAddr); + + // waiting to publish urLedger znode by Auditor + latch.await(); + + // Again watching the urLedger znode to know the replication status + latch = new CountDownLatch(listOfLedgerHandle.size()); + for (LedgerHandle lh : listOfLedgerHandle) { + String urLedgerZNode = getUrLedgerZNode(lh); + LOG.info("Watching on urLedgerPath:" + urLedgerZNode + + " to know the status of rereplication process"); + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + } + + // starting the replication service, so that he will be able to act as + // target bookie + startNewBookie(); + int newBookieIndex = lastBookieIndex(); + BookieServer newBookieServer = serverByIndex(newBookieIndex); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to finish the replication of failed bookie : " + + replicaToKillAddr); + } + + // waiting to finish replication + latch.await(); + + // grace period to update the urledger metadata in zookeeper + LOG.info("Waiting to update the urledger metadata in zookeeper"); + + for (int index = 0; index < listOfLedgerHandle.size(); index++) { + verifyLedgerEnsembleMetadataAfterReplication(newBookieServer, + listOfLedgerHandle.get(index), + listOfReplicaIndex.get(index)); + } + } + + /** + * Test stopping replica service while replication in progress. Considering + * when there is an exception will shutdown Auditor and RW processes. After + * restarting should be able to finish the re-replication activities + */ + @Test + public void testStopWhileReplicationInProgress() throws Exception { + int numberOfLedgers = 2; + List listOfReplicaIndex = new ArrayList(); + List listOfLedgerHandle = createLedgersAndAddEntries( + numberOfLedgers, 5); + closeLedgers(listOfLedgerHandle); + LedgerHandle handle = listOfLedgerHandle.get(0); + BookieId replicaToKillAddr = handle.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + LOG.info("Killing Bookie:" + replicaToKillAddr); + + // Each ledger, there will be two events : create urLedger and after + // rereplication delete urLedger + CountDownLatch latch = new CountDownLatch(listOfLedgerHandle.size()); + for (int i = 0; i < listOfLedgerHandle.size(); i++) { + final String urLedgerZNode = getUrLedgerZNode(listOfLedgerHandle + .get(i)); + assertNull("UrLedger already exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + int replicaIndexInLedger = getReplicaIndexInLedger( + listOfLedgerHandle.get(i), replicaToKillAddr); + listOfReplicaIndex.add(replicaIndexInLedger); + } + + LOG.info("Killing Bookie :" + replicaToKillAddr); + killBookie(replicaToKillAddr); + + // waiting to publish urLedger znode by Auditor + latch.await(); + + // Again watching the urLedger znode to know the replication status + latch = new CountDownLatch(listOfLedgerHandle.size()); + for (LedgerHandle lh : listOfLedgerHandle) { + String urLedgerZNode = getUrLedgerZNode(lh); + LOG.info("Watching on urLedgerPath:" + urLedgerZNode + + " to know the status of rereplication process"); + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + } + + // starting the replication service, so that he will be able to act as + // target bookie + startNewBookie(); + int newBookieIndex = lastBookieIndex(); + BookieServer newBookieServer = serverByIndex(newBookieIndex); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to finish the replication of failed bookie : " + + replicaToKillAddr); + } + while (true) { + if (latch.getCount() < numberOfLedgers || latch.getCount() <= 0) { + stopReplicationService(); + LOG.info("Latch Count is:" + latch.getCount()); + break; + } + // grace period to take breath + Thread.sleep(1000); + } + + startReplicationService(); + + LOG.info("Waiting to finish rereplication processes"); + latch.await(); + + // grace period to update the urledger metadata in zookeeper + LOG.info("Waiting to update the urledger metadata in zookeeper"); + + for (int index = 0; index < listOfLedgerHandle.size(); index++) { + verifyLedgerEnsembleMetadataAfterReplication(newBookieServer, + listOfLedgerHandle.get(index), + listOfReplicaIndex.get(index)); + } + } + + /** + * Verify the published urledgers of deleted ledgers(those ledgers where + * deleted after publishing as urledgers by Auditor) should be cleared off + * by the newly selected replica bookie. + */ + @Test + public void testNoSuchLedgerExists() throws Exception { + List listOfLedgerHandle = createLedgersAndAddEntries(2, 5); + CountDownLatch latch = new CountDownLatch(listOfLedgerHandle.size()); + for (LedgerHandle lh : listOfLedgerHandle) { + assertNull("UrLedger already exists!", + watchUrLedgerNode(getUrLedgerZNode(lh), latch)); + } + BookieId replicaToKillAddr = listOfLedgerHandle.get(0) + .getLedgerMetadata().getAllEnsembles() + .get(0L).get(0); + killBookie(replicaToKillAddr); + replicaToKillAddr = listOfLedgerHandle.get(0) + .getLedgerMetadata().getAllEnsembles() + .get(0L).get(0); + killBookie(replicaToKillAddr); + // waiting to publish urLedger znode by Auditor + latch.await(); + + latch = new CountDownLatch(listOfLedgerHandle.size()); + for (LedgerHandle lh : listOfLedgerHandle) { + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(getUrLedgerZNode(lh), latch)); + } + + // delete ledgers + for (LedgerHandle lh : listOfLedgerHandle) { + bkc.deleteLedger(lh.getId()); + } + startNewBookie(); + + // waiting to delete published urledgers, since it doesn't exists + latch.await(); + + for (LedgerHandle lh : listOfLedgerHandle) { + assertNull("UrLedger still exists after rereplication", + watchUrLedgerNode(getUrLedgerZNode(lh), latch)); + } + } + + /** + * Test that if a empty ledger loses the bookie not in the quorum for entry 0, it will + * still be openable when it loses enough bookies to lose a whole quorum. + */ + @Test + public void testEmptyLedgerLosesQuorumEventually() throws Exception { + LedgerHandle lh = bkc.createLedger(3, 2, 2, DigestType.CRC32, PASSWD); + CountDownLatch latch = new CountDownLatch(1); + String urZNode = getUrLedgerZNode(lh); + watchUrLedgerNode(urZNode, latch); + + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(2); + LOG.info("Killing last bookie, {}, in ensemble {}", replicaToKill, + lh.getLedgerMetadata().getAllEnsembles().get(0L)); + killBookie(replicaToKill); + startNewBookie(); + + getAuditor(10, TimeUnit.SECONDS).submitAuditTask().get(); // ensure auditor runs + + assertTrue("Should be marked as underreplicated", latch.await(5, TimeUnit.SECONDS)); + latch = new CountDownLatch(1); + Stat s = watchUrLedgerNode(urZNode, latch); // should be marked as replicated + if (s != null) { + assertTrue("Should be marked as replicated", latch.await(15, TimeUnit.SECONDS)); + } + + replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(1); + LOG.info("Killing second bookie, {}, in ensemble {}", replicaToKill, + lh.getLedgerMetadata().getAllEnsembles().get(0L)); + killBookie(replicaToKill); + + getAuditor(10, TimeUnit.SECONDS).submitAuditTask().get(); // ensure auditor runs + + assertTrue("Should be marked as underreplicated", latch.await(5, TimeUnit.SECONDS)); + latch = new CountDownLatch(1); + s = watchUrLedgerNode(urZNode, latch); // should be marked as replicated + + startNewBookie(); + getAuditor(10, TimeUnit.SECONDS).submitAuditTask().get(); // ensure auditor runs + + if (s != null) { + assertTrue("Should be marked as replicated", latch.await(20, TimeUnit.SECONDS)); + } + + // should be able to open ledger without issue + bkc.openLedger(lh.getId(), DigestType.CRC32, PASSWD); + } + + /** + * Test verifies bookie recovery, the host (recorded via ipaddress in + * ledgermetadata). + */ + @Test + public void testLedgerMetadataContainsIpAddressAsBookieID() + throws Exception { + stopBKCluster(); + bkc = new BookKeeperTestClient(baseClientConf); + // start bookie with useHostNameAsBookieID=false, as old bookie + ServerConfiguration serverConf1 = newServerConfiguration(); + // start 2 more bookies with useHostNameAsBookieID=true + ServerConfiguration serverConf2 = newServerConfiguration(); + serverConf2.setUseHostNameAsBookieID(true); + ServerConfiguration serverConf3 = newServerConfiguration(); + serverConf3.setUseHostNameAsBookieID(true); + startAndAddBookie(serverConf1); + startAndAddBookie(serverConf2); + startAndAddBookie(serverConf3); + + List listOfLedgerHandle = createLedgersAndAddEntries(1, 5); + LedgerHandle lh = listOfLedgerHandle.get(0); + int ledgerReplicaIndex = 0; + final SortedMap> ensembles = lh.getLedgerMetadata().getAllEnsembles(); + final List bkAddresses = ensembles.get(0L); + BookieId replicaToKillAddr = bkAddresses.get(0); + for (BookieId bookieSocketAddress : bkAddresses) { + if (!isCreatedFromIp(bookieSocketAddress)) { + replicaToKillAddr = bookieSocketAddress; + LOG.info("Kill bookie which has registered using hostname"); + break; + } + } + + final String urLedgerZNode = getUrLedgerZNode(lh); + ledgerReplicaIndex = getReplicaIndexInLedger(lh, replicaToKillAddr); + + CountDownLatch latch = new CountDownLatch(1); + assertNull("UrLedger already exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + LOG.info("Killing Bookie :" + replicaToKillAddr); + killBookie(replicaToKillAddr); + + // waiting to publish urLedger znode by Auditor + latch.await(); + latch = new CountDownLatch(1); + LOG.info("Watching on urLedgerPath:" + urLedgerZNode + + " to know the status of rereplication process"); + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + // starting the replication service, so that he will be able to act as + // target bookie + ServerConfiguration serverConf = newServerConfiguration(); + serverConf.setUseHostNameAsBookieID(false); + startAndAddBookie(serverConf); + + int newBookieIndex = lastBookieIndex(); + BookieServer newBookieServer = serverByIndex(newBookieIndex); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to finish the replication of failed bookie : " + + replicaToKillAddr); + } + latch.await(); + + // grace period to update the urledger metadata in zookeeper + LOG.info("Waiting to update the urledger metadata in zookeeper"); + + verifyLedgerEnsembleMetadataAfterReplication(newBookieServer, + listOfLedgerHandle.get(0), ledgerReplicaIndex); + + } + + /** + * Test verifies bookie recovery, the host (recorded via useHostName in + * ledgermetadata). + */ + @Test + public void testLedgerMetadataContainsHostNameAsBookieID() + throws Exception { + stopBKCluster(); + + bkc = new BookKeeperTestClient(baseClientConf); + // start bookie with useHostNameAsBookieID=false, as old bookie + ServerConfiguration serverConf1 = newServerConfiguration(); + // start 2 more bookies with useHostNameAsBookieID=true + ServerConfiguration serverConf2 = newServerConfiguration(); + serverConf2.setUseHostNameAsBookieID(true); + ServerConfiguration serverConf3 = newServerConfiguration(); + serverConf3.setUseHostNameAsBookieID(true); + startAndAddBookie(serverConf1); + startAndAddBookie(serverConf2); + startAndAddBookie(serverConf3); + + List listOfLedgerHandle = createLedgersAndAddEntries(1, 5); + LedgerHandle lh = listOfLedgerHandle.get(0); + int ledgerReplicaIndex = 0; + final SortedMap> ensembles = lh.getLedgerMetadata().getAllEnsembles(); + final List bkAddresses = ensembles.get(0L); + BookieId replicaToKillAddr = bkAddresses.get(0); + for (BookieId bookieSocketAddress : bkAddresses) { + if (isCreatedFromIp(bookieSocketAddress)) { + replicaToKillAddr = bookieSocketAddress; + LOG.info("Kill bookie which has registered using ipaddress"); + break; + } + } + + final String urLedgerZNode = getUrLedgerZNode(lh); + ledgerReplicaIndex = getReplicaIndexInLedger(lh, replicaToKillAddr); + + CountDownLatch latch = new CountDownLatch(1); + assertNull("UrLedger already exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + LOG.info("Killing Bookie :" + replicaToKillAddr); + killBookie(replicaToKillAddr); + + // waiting to publish urLedger znode by Auditor + latch.await(); + latch = new CountDownLatch(1); + LOG.info("Watching on urLedgerPath:" + urLedgerZNode + + " to know the status of rereplication process"); + assertNotNull("UrLedger doesn't exists!", + watchUrLedgerNode(urLedgerZNode, latch)); + + // creates new bkclient + bkc = new BookKeeperTestClient(baseClientConf); + // starting the replication service, so that he will be able to act as + // target bookie + ServerConfiguration serverConf = newServerConfiguration(); + serverConf.setUseHostNameAsBookieID(true); + startAndAddBookie(serverConf); + + int newBookieIndex = lastBookieIndex(); + BookieServer newBookieServer = serverByIndex(newBookieIndex); + + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to finish the replication of failed bookie : " + + replicaToKillAddr); + } + latch.await(); + + // grace period to update the urledger metadata in zookeeper + LOG.info("Waiting to update the urledger metadata in zookeeper"); + + verifyLedgerEnsembleMetadataAfterReplication(newBookieServer, + listOfLedgerHandle.get(0), ledgerReplicaIndex); + + } + + private int getReplicaIndexInLedger(LedgerHandle lh, BookieId replicaToKill) { + SortedMap> ensembles = lh.getLedgerMetadata().getAllEnsembles(); + int ledgerReplicaIndex = -1; + for (BookieId addr : ensembles.get(0L)) { + ++ledgerReplicaIndex; + if (addr.equals(replicaToKill)) { + break; + } + } + return ledgerReplicaIndex; + } + + private void verifyLedgerEnsembleMetadataAfterReplication( + BookieServer newBookieServer, LedgerHandle lh, + int ledgerReplicaIndex) throws Exception { + LedgerHandle openLedger = bkc + .openLedger(lh.getId(), digestType, PASSWD); + + BookieId inetSocketAddress = openLedger.getLedgerMetadata().getAllEnsembles().get(0L) + .get(ledgerReplicaIndex); + assertEquals("Rereplication has been failed and ledgerReplicaIndex :" + + ledgerReplicaIndex, newBookieServer.getBookieId(), + inetSocketAddress); + openLedger.close(); + } + + private void closeLedgers(List listOfLedgerHandle) + throws InterruptedException, BKException { + for (LedgerHandle lh : listOfLedgerHandle) { + lh.close(); + } + } + + private List createLedgersAndAddEntries(int numberOfLedgers, + int numberOfEntries) + throws InterruptedException, BKException { + List listOfLedgerHandle = new ArrayList( + numberOfLedgers); + for (int index = 0; index < numberOfLedgers; index++) { + LedgerHandle lh = bkc.createLedger(3, 3, digestType, PASSWD); + listOfLedgerHandle.add(lh); + for (int i = 0; i < numberOfEntries; i++) { + lh.addEntry(data); + } + } + return listOfLedgerHandle; + } + + private String getUrLedgerZNode(LedgerHandle lh) { + return ZkLedgerUnderreplicationManager.getUrLedgerZnode( + underreplicatedPath, lh.getId()); + } + + private Stat watchUrLedgerNode(final String znode, + final CountDownLatch latch) throws KeeperException, + InterruptedException { + return zkc.exists(znode, new Watcher() { + @Override + public void process(WatchedEvent event) { + if (event.getType() == EventType.NodeDeleted) { + LOG.info("Received Ledger rereplication completion event :" + + event.getType()); + latch.countDown(); + } + if (event.getType() == EventType.NodeCreated) { + LOG.info("Received urLedger publishing event :" + + event.getType()); + latch.countDown(); + } + } + }); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieLedgerIndexTest.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieLedgerIndexTest.java new file mode 100644 index 0000000000000..1d5cf868cce65 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/BookieLedgerIndexTest.java @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import static org.testng.AssertJUnit.fail; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper.DigestType; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.meta.LayoutManager; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.bookkeeper.PulsarLayoutManager; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Tests verifies bookie vs ledger mapping generating by the BookieLedgerIndexer. + */ +public class BookieLedgerIndexTest extends BookKeeperClusterTestCase { + + // Depending on the taste, select the amount of logging + // by decommenting one of the two lines below + // private final static Logger LOG = Logger.getRootLogger(); + private static final Logger LOG = LoggerFactory + .getLogger(BookieLedgerIndexTest.class); + + private Random rng; // Random Number Generator + private ArrayList entries; // generated entries + private final DigestType digestType = DigestType.CRC32; + private int numberOfLedgers = 3; + private List ledgerList; + private LedgerManagerFactory newLedgerManagerFactory; + private LedgerManager ledgerManager; + + public BookieLedgerIndexTest() throws Exception { + this("org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory"); + } + + BookieLedgerIndexTest(String ledgerManagerFactory) throws Exception { + super(3); + LOG.info("Running test case using ledger manager : " + + ledgerManagerFactory); + // set ledger manager name + baseConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + baseClientConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + public void setUp() throws Exception { + super.setUp(); + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + rng = new Random(System.currentTimeMillis()); // Initialize the Random + // Number Generator + entries = new ArrayList(); // initialize the entries list + ledgerList = new ArrayList(3); + + String ledgersRoot = "/ledgers"; + String storeUri = metadataServiceUri.replaceAll("zk://", "").replaceAll("/ledgers", ""); + MetadataStoreExtended store = MetadataStoreExtended.create(storeUri, + MetadataStoreConfig.builder().fsyncEnable(false).build()); + LayoutManager layoutManager = new PulsarLayoutManager(store, ledgersRoot); + newLedgerManagerFactory = new PulsarLedgerManagerFactory(); + + ClientConfiguration conf = new ClientConfiguration(); + conf.setZkLedgersRootPath(ledgersRoot); + newLedgerManagerFactory.initialize(conf, layoutManager, 1); + ledgerManager = newLedgerManagerFactory.newLedgerManager(); + } + + @AfterMethod + public void tearDown() throws Exception { + super.tearDown(); + if (null != newLedgerManagerFactory) { + newLedgerManagerFactory.close(); + newLedgerManagerFactory = null; + } + if (null != ledgerManager) { + ledgerManager.close(); + ledgerManager = null; + } + } + + /** + * Verify the bookie-ledger mapping with minimum number of bookies and few + * ledgers. + */ + @Test + public void testSimpleBookieLedgerMapping() throws Exception { + + for (int i = 0; i < numberOfLedgers; i++) { + createAndAddEntriesToLedger().close(); + } + + BookieLedgerIndexer bookieLedgerIndex = new BookieLedgerIndexer( + ledgerManager); + + Map> bookieToLedgerIndex = bookieLedgerIndex + .getBookieToLedgerIndex(); + + assertEquals("Missed few bookies in the bookie-ledger mapping!", 3, + bookieToLedgerIndex.size()); + Collection> bk2ledgerEntry = bookieToLedgerIndex.values(); + for (Set ledgers : bk2ledgerEntry) { + assertEquals("Missed few ledgers in the bookie-ledger mapping!", 3, + ledgers.size()); + for (Long ledgerId : ledgers) { + assertTrue("Unknown ledger-bookie mapping", ledgerList + .contains(ledgerId)); + } + } + } + + /** + * Verify ledger index with failed bookies and throws exception. + */ + @SuppressWarnings("deprecation") +// @Test +// public void testWithoutZookeeper() throws Exception { +// // This test case is for ledger metadata that stored in ZooKeeper. As +// // far as MSLedgerManagerFactory, ledger metadata are stored in other +// // storage. So this test is not suitable for MSLedgerManagerFactory. +// if (newLedgerManagerFactory instanceof org.apache.bookkeeper.meta.MSLedgerManagerFactory) { +// return; +// } +// +// for (int i = 0; i < numberOfLedgers; i++) { +// createAndAddEntriesToLedger().close(); +// } +// +// BookieLedgerIndexer bookieLedgerIndex = new BookieLedgerIndexer( +// ledgerManager); +// stopZKCluster(); +// try { +// bookieLedgerIndex.getBookieToLedgerIndex(); +// fail("Must throw exception as zookeeper are not running!"); +// } catch (BKAuditException bkAuditException) { +// // expected behaviour +// } +// } + + /** + * Verify indexing with multiple ensemble reformation. + */ + @Test + public void testEnsembleReformation() throws Exception { + try { + LedgerHandle lh1 = createAndAddEntriesToLedger(); + LedgerHandle lh2 = createAndAddEntriesToLedger(); + + startNewBookie(); + shutdownBookie(lastBookieIndex() - 1); + + // add few more entries after ensemble reformation + for (int i = 0; i < 10; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(Integer.MAX_VALUE)); + entry.position(0); + + entries.add(entry.array()); + lh1.addEntry(entry.array()); + lh2.addEntry(entry.array()); + } + + BookieLedgerIndexer bookieLedgerIndex = new BookieLedgerIndexer( + ledgerManager); + + Map> bookieToLedgerIndex = bookieLedgerIndex + .getBookieToLedgerIndex(); + assertEquals("Missed few bookies in the bookie-ledger mapping!", 4, + bookieToLedgerIndex.size()); + Collection> bk2ledgerEntry = bookieToLedgerIndex.values(); + for (Set ledgers : bk2ledgerEntry) { + assertEquals( + "Missed few ledgers in the bookie-ledger mapping!", 2, + ledgers.size()); + for (Long ledgerNode : ledgers) { + assertTrue("Unknown ledger-bookie mapping", ledgerList + .contains(ledgerNode)); + } + } + } catch (BKException e) { + LOG.error("Test failed", e); + fail("Test failed due to BookKeeper exception"); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LOG.error("Test failed", e); + fail("Test failed due to interruption"); + } + } + + private void shutdownBookie(int bkShutdownIndex) throws Exception { + killBookie(bkShutdownIndex); + } + + private LedgerHandle createAndAddEntriesToLedger() throws BKException, + InterruptedException { + int numEntriesToWrite = 20; + // Create a ledger + LedgerHandle lh = bkc.createLedger(digestType, "admin".getBytes()); + LOG.info("Ledger ID: " + lh.getId()); + for (int i = 0; i < numEntriesToWrite; i++) { + ByteBuffer entry = ByteBuffer.allocate(4); + entry.putInt(rng.nextInt(Integer.MAX_VALUE)); + entry.position(0); + + entries.add(entry.array()); + lh.addEntry(entry.array()); + } + ledgerList.add(lh.getId()); + return lh; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ReplicationTestUtil.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ReplicationTestUtil.java new file mode 100644 index 0000000000000..4360bf3254675 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ReplicationTestUtil.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import java.util.List; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooKeeper; + +/** + * Utility class for replication tests. + */ +public class ReplicationTestUtil { + + /** + * Checks whether ledger is in under-replication. + */ + public static boolean isLedgerInUnderReplication(ZooKeeper zkc, long id, + String basePath) throws KeeperException, InterruptedException { + List children; + try { + children = zkc.getChildren(basePath, true); + } catch (KeeperException.NoNodeException nne) { + return false; + } + + boolean isMatched = false; + for (String child : children) { + if (child.startsWith("urL") && child.contains(String.valueOf(id))) { + isMatched = true; + break; + } else { + String path = basePath + '/' + child; + try { + if (zkc.getChildren(path, false).size() > 0) { + isMatched = isLedgerInUnderReplication(zkc, id, path); + } + } catch (KeeperException.NoNodeException nne) { + return false; + } + } + + } + return isMatched; + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestAutoRecoveryAlongWithBookieServers.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestAutoRecoveryAlongWithBookieServers.java new file mode 100644 index 0000000000000..11797c8373715 --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestAutoRecoveryAlongWithBookieServers.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import java.util.Enumeration; +import java.util.List; +import java.util.Map.Entry; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.LedgerEntry; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.util.BookKeeperConstants; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Test auto recovery. + */ +public class TestAutoRecoveryAlongWithBookieServers extends + BookKeeperClusterTestCase { + + private String basePath = ""; + + public TestAutoRecoveryAlongWithBookieServers() throws Exception { + super(3); + setAutoRecoveryEnabled(true); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + basePath = BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH + '/' + + BookKeeperConstants.UNDER_REPLICATION_NODE + + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH; + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + @Override + protected void startBKCluster(String metadataServiceUri) throws Exception { + super.startBKCluster(metadataServiceUri.replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + } + + /** + * Tests that the auto recovery service along with Bookie servers itself. + */ + @Test + public void testAutoRecoveryAlongWithBookieServers() throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + "testpasswd".getBytes()); + byte[] testData = "testBuiltAutoRecovery".getBytes(); + + for (int i = 0; i < 10; i++) { + lh.addEntry(testData); + } + lh.close(); + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), + basePath)) { + Thread.sleep(100); + } + + // Killing all bookies except newly replicated bookie + for (Entry> entry : + lh.getLedgerMetadata().getAllEnsembles().entrySet()) { + List bookies = entry.getValue(); + for (BookieId bookie : bookies) { + if (bookie.equals(newBkAddr)) { + continue; + } + killBookie(bookie); + } + } + + // Should be able to read the entries from 0-9 + LedgerHandle lhs = bkc.openLedgerNoRecovery(lh.getId(), + BookKeeper.DigestType.CRC32, "testpasswd".getBytes()); + Enumeration entries = lhs.readEntries(0, 9); + assertTrue("Should have the elements", entries.hasMoreElements()); + while (entries.hasMoreElements()) { + LedgerEntry entry = entries.nextElement(); + assertEquals("testBuiltAutoRecovery", new String(entry.getEntry())); + } + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestReplicationWorker.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestReplicationWorker.java new file mode 100644 index 0000000000000..7938feaba19fe --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/TestReplicationWorker.java @@ -0,0 +1,1248 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.replication; + +import static org.apache.bookkeeper.replication.ReplicationStats.AUDITOR_SCOPE; +import static org.apache.bookkeeper.replication.ReplicationStats.NUM_ENTRIES_UNABLE_TO_READ_FOR_REPLICATION; +import static org.apache.bookkeeper.replication.ReplicationStats.REPLICATION_SCOPE; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertFalse; +import static org.testng.AssertJUnit.assertNotNull; +import static org.testng.AssertJUnit.assertNull; +import static org.testng.AssertJUnit.assertTrue; +import static org.testng.AssertJUnit.fail; +import io.netty.util.HashedWheelTimer; +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.net.URI; +import java.net.UnknownHostException; +import java.util.Enumeration; +import java.util.List; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Optional; +import java.util.TimerTask; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; +import lombok.Cleanup; +import org.apache.bookkeeper.bookie.BookieImpl; +import org.apache.bookkeeper.client.BKException; +import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.client.BookKeeperTestClient; +import org.apache.bookkeeper.client.ClientUtil; +import org.apache.bookkeeper.client.EnsemblePlacementPolicy; +import org.apache.bookkeeper.client.LedgerEntry; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy; +import org.apache.bookkeeper.client.ZoneawareEnsemblePlacementPolicy; +import org.apache.bookkeeper.client.api.LedgerMetadata; +import org.apache.bookkeeper.common.util.OrderedScheduler; +import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.conf.ServerConfiguration; +import org.apache.bookkeeper.feature.FeatureProvider; +import org.apache.bookkeeper.meta.LedgerManager; +import org.apache.bookkeeper.meta.LedgerManagerFactory; +import org.apache.bookkeeper.meta.LedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.MetadataBookieDriver; +import org.apache.bookkeeper.meta.MetadataClientDriver; +import org.apache.bookkeeper.meta.MetadataDrivers; +import org.apache.bookkeeper.meta.ZkLedgerUnderreplicationManager; +import org.apache.bookkeeper.meta.exceptions.MetadataException; +import org.apache.bookkeeper.meta.zk.ZKMetadataDriverBase; +import org.apache.bookkeeper.net.BookieId; +import org.apache.bookkeeper.net.DNSToSwitchMapping; +import org.apache.bookkeeper.proto.BookieAddressResolver; +import org.apache.bookkeeper.replication.ReplicationException.CompatibilityException; +import org.apache.bookkeeper.stats.Counter; +import org.apache.bookkeeper.stats.Gauge; +import org.apache.bookkeeper.stats.NullStatsLogger; +import org.apache.bookkeeper.stats.StatsLogger; +import org.apache.bookkeeper.test.TestStatsProvider; +import org.apache.bookkeeper.test.TestStatsProvider.TestStatsLogger; +import org.apache.bookkeeper.util.BookKeeperConstants; +import org.apache.bookkeeper.util.StaticDNSResolver; +import org.apache.bookkeeper.zookeeper.ZooKeeperClient; +import org.apache.commons.lang3.mutable.MutableObject; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory; +import org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; +import org.awaitility.Awaitility; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Test the ReplicationWroker, where it has to replicate the fragments from + * failed Bookies to given target Bookie. + */ +public class TestReplicationWorker extends BookKeeperClusterTestCase { + + private static final byte[] TESTPASSWD = "testpasswd".getBytes(); + private static final Logger LOG = LoggerFactory + .getLogger(TestReplicationWorker.class); + private String basePath = ""; + private String baseLockPath = ""; + private MetadataBookieDriver driver; + private LedgerManagerFactory mFactory; + private LedgerUnderreplicationManager underReplicationManager; + private LedgerManager ledgerManager; + private static byte[] data = "TestReplicationWorker".getBytes(); + private OrderedScheduler scheduler; + private String zkLedgersRootPath; + + public TestReplicationWorker() throws Exception { + this("org.apache.pulsar.metadata.bookkeeper.PulsarLedgerManagerFactory"); + } + + TestReplicationWorker(String ledgerManagerFactory) throws Exception { + super(3, 300); + LOG.info("Running test case using ledger manager : " + + ledgerManagerFactory); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataClientDriver"); + Class.forName("org.apache.pulsar.metadata.bookkeeper.PulsarMetadataBookieDriver"); + // set ledger manager name + baseConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + baseClientConf.setLedgerManagerFactoryClassName(ledgerManagerFactory); + baseConf.setRereplicationEntryBatchSize(3); + baseConf.setZkTimeout(7000); + baseConf.setZkRetryBackoffMaxMs(500); + baseConf.setZkRetryBackoffStartMs(10); + } + + @BeforeMethod + @Override + public void setUp() throws Exception { + super.setUp(); + zkLedgersRootPath = ZKMetadataDriverBase.resolveZkLedgersRootPath(baseClientConf); + basePath = zkLedgersRootPath + '/' + + BookKeeperConstants.UNDER_REPLICATION_NODE + + BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH; + baseLockPath = zkLedgersRootPath + '/' + + BookKeeperConstants.UNDER_REPLICATION_NODE + + "/locks"; + baseClientConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + baseConf.setMetadataServiceUri( + zkUtil.getMetadataServiceUri().replaceAll("zk://", "metadata-store:").replaceAll("/ledgers", "")); + this.scheduler = OrderedScheduler.newSchedulerBuilder() + .name("test-scheduler") + .numThreads(1) + .build(); + + this.driver = MetadataDrivers.getBookieDriver( + URI.create(baseConf.getMetadataServiceUri())); + this.driver.initialize( + baseConf, + NullStatsLogger.INSTANCE); + // initialize urReplicationManager + mFactory = driver.getLedgerManagerFactory(); + ledgerManager = mFactory.newLedgerManager(); + underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + } + + @AfterMethod + @Override + public void tearDown() throws Exception { + super.tearDown(); + if (null != ledgerManager) { + ledgerManager.close(); + ledgerManager = null; + } + if (null != underReplicationManager) { + underReplicationManager.close(); + underReplicationManager = null; + } + if (null != driver) { + driver.close(); + } + if (null != scheduler) { + scheduler.shutdown(); + scheduler = null; + } + if (null != mFactory) { + mFactory.close(); + } + } + + /** + * Tests that replication worker should replicate the failed bookie + * fragments to target bookie given to the worker. + */ + @Test + public void testRWShouldReplicateFragmentsToTargetBookie() throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + LOG.info("Killing Bookie : {}", replicaToKill); + killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + + ReplicationWorker rw = new ReplicationWorker(baseConf); + + rw.start(); + try { + + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + + killAllBookies(lh, newBkAddr); + + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh, 0, 9); + } finally { + rw.shutdown(); + } + } + + /** + * Tests that replication worker should retry for replication until enough + * bookies available for replication. + */ + @Test + public void testRWShouldRetryUntilThereAreEnoughBksAvailableForReplication() + throws Exception { + LedgerHandle lh = bkc.createLedger(1, 1, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + lh.close(); + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + LOG.info("Killing Bookie : {}", replicaToKill); + ServerConfiguration killedBookieConfig = killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr :" + newBkAddr); + + killAllBookies(lh, newBkAddr); + ReplicationWorker rw = new ReplicationWorker(baseConf); + + rw.start(); + try { + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + int counter = 30; + while (counter-- > 0) { + assertTrue("Expecting that replication should not complete", + ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)); + Thread.sleep(100); + } + // restart killed bookie + startAndAddBookie(killedBookieConfig); + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh, 0, 9); + } finally { + rw.shutdown(); + } + } + + /** + * Tests that replication worker1 should take one fragment replication and + * other replication worker also should compete for the replication. + */ + @Test + public void test2RWsShouldCompeteForReplicationOf2FragmentsAndCompleteReplication() + throws Exception { + LedgerHandle lh = bkc.createLedger(2, 2, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + lh.close(); + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + LOG.info("Killing Bookie : {}", replicaToKill); + ServerConfiguration killedBookieConfig = killBookie(replicaToKill); + + killAllBookies(lh, null); + // Starte RW1 + BookieId newBkAddr1 = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr1); + ReplicationWorker rw1 = new ReplicationWorker(baseConf); + + // Starte RW2 + BookieId newBkAddr2 = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr2); + ReplicationWorker rw2 = new ReplicationWorker(baseConf); + rw1.start(); + rw2.start(); + + try { + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + int counter = 10; + while (counter-- > 0) { + assertTrue("Expecting that replication should not complete", + ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)); + Thread.sleep(100); + } + // restart killed bookie + startAndAddBookie(killedBookieConfig); + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh, 0, 9); + } finally { + rw1.shutdown(); + rw2.shutdown(); + } + } + + /** + * Tests that Replication worker should clean the leadger under replication + * node of the ledger already deleted. + */ + @Test + public void testRWShouldCleanTheLedgerFromUnderReplicationIfLedgerAlreadyDeleted() + throws Exception { + LedgerHandle lh = bkc.createLedger(2, 2, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + lh.close(); + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + LOG.info("Killing Bookie : {}", replicaToKill); + killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr); + ReplicationWorker rw = new ReplicationWorker(baseConf); + rw.start(); + + try { + bkc.deleteLedger(lh.getId()); // Deleting the ledger + // Also mark ledger as in UnderReplication + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + } finally { + rw.shutdown(); + } + + } + + @Test + public void testMultipleLedgerReplicationWithReplicationWorker() + throws Exception { + // Ledger1 + LedgerHandle lh1 = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh1.addEntry(data); + } + BookieId replicaToKillFromFirstLedger = lh1.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + LOG.info("Killing Bookie : {}", replicaToKillFromFirstLedger); + + // Ledger2 + LedgerHandle lh2 = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh2.addEntry(data); + } + BookieId replicaToKillFromSecondLedger = lh2.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + LOG.info("Killing Bookie : {}", replicaToKillFromSecondLedger); + + // Kill ledger1 + killBookie(replicaToKillFromFirstLedger); + lh1.close(); + // Kill ledger2 + killBookie(replicaToKillFromFirstLedger); + lh2.close(); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr); + + ReplicationWorker rw = new ReplicationWorker(baseConf); + + rw.start(); + try { + + // Mark ledger1 and 2 as underreplicated + underReplicationManager.markLedgerUnderreplicated(lh1.getId(), + replicaToKillFromFirstLedger.toString()); + underReplicationManager.markLedgerUnderreplicated(lh2.getId(), + replicaToKillFromSecondLedger.toString()); + + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh1 + .getId(), basePath)) { + Thread.sleep(100); + } + + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh2 + .getId(), basePath)) { + Thread.sleep(100); + } + + killAllBookies(lh1, newBkAddr); + + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh1, 0, 9); + verifyRecoveredLedgers(lh2, 0, 9); + } finally { + rw.shutdown(); + } + + } + + /** + * Tests that ReplicationWorker should fence the ledger and release ledger + * lock after timeout. Then replication should happen normally. + */ + @Test + public void testRWShouldReplicateTheLedgersAfterTimeoutIfLastFragmentIsUR() + throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + LOG.info("Killing Bookie : {}", replicaToKill); + killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr); + + // set to 3s instead of default 30s + baseConf.setOpenLedgerRereplicationGracePeriod("3000"); + ReplicationWorker rw = new ReplicationWorker(baseConf); + + @Cleanup MetadataClientDriver clientDriver = MetadataDrivers.getClientDriver( + URI.create(baseClientConf.getMetadataServiceUri())); + clientDriver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.empty()); + + LedgerManagerFactory mFactory = clientDriver.getLedgerManagerFactory(); + + LedgerUnderreplicationManager underReplicationManager = mFactory + .newLedgerUnderreplicationManager(); + rw.start(); + try { + + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + killAllBookies(lh, newBkAddr); + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh, 0, 9); + lh = bkc.openLedgerNoRecovery(lh.getId(), + BookKeeper.DigestType.CRC32, TESTPASSWD); + assertFalse("Ledger must have been closed by RW", ClientUtil + .isLedgerOpen(lh)); + } finally { + rw.shutdown(); + underReplicationManager.close(); + } + + } + + @Test + public void testBookiesNotAvailableScenarioForReplicationWorker() throws Exception { + int ensembleSize = 3; + LedgerHandle lh = bkc.createLedger(ensembleSize, ensembleSize, BookKeeper.DigestType.CRC32, TESTPASSWD); + + int numOfEntries = 7; + for (int i = 0; i < numOfEntries; i++) { + lh.addEntry(data); + } + lh.close(); + + BookieId[] bookiesKilled = new BookieId[ensembleSize]; + ServerConfiguration[] killedBookiesConfig = new ServerConfiguration[ensembleSize]; + + // kill all bookies + for (int i = 0; i < ensembleSize; i++) { + bookiesKilled[i] = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(i); + killedBookiesConfig[i] = getBkConf(bookiesKilled[i]); + LOG.info("Killing Bookie : {}", bookiesKilled[i]); + killBookie(bookiesKilled[i]); + } + + // start new bookiesToKill number of bookies + for (int i = 0; i < ensembleSize; i++) { + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + } + + // create couple of replicationworkers + ServerConfiguration newRWConf = new ServerConfiguration(baseConf); + newRWConf.setLockReleaseOfFailedLedgerGracePeriod("64"); + ReplicationWorker rw1 = new ReplicationWorker(newRWConf); + ReplicationWorker rw2 = new ReplicationWorker(newRWConf); + + @Cleanup + MetadataClientDriver clientDriver = MetadataDrivers + .getClientDriver(URI.create(baseClientConf.getMetadataServiceUri())); + clientDriver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.empty()); + + LedgerManagerFactory mFactory = clientDriver.getLedgerManagerFactory(); + + LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + try { + //mark ledger underreplicated + for (int i = 0; i < bookiesKilled.length; i++) { + underReplicationManager.markLedgerUnderreplicated(lh.getId(), bookiesKilled[i].toString()); + } + while (!ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)) { + Thread.sleep(100); + } + rw1.start(); + rw2.start(); + + AtomicBoolean isBookieRestarted = new AtomicBoolean(false); + + (new Thread(new Runnable() { + @Override + public void run() { + try { + Thread.sleep(3000); + isBookieRestarted.set(true); + /* + * after sleeping for 3000 msecs, restart one of the + * bookie, so that replication can succeed. + */ + startBookie(killedBookiesConfig[0]); + } catch (Exception e) { + e.printStackTrace(); + } + } + })).start(); + + int rw1PrevFailedAttemptsCount = 0; + int rw2PrevFailedAttemptsCount = 0; + while (!isBookieRestarted.get()) { + /* + * since all the bookies containing the ledger entries are down + * replication wouldnt have succeeded. + */ + assertTrue("Ledger: " + lh.getId() + " should be underreplicated", + ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)); + + // the number of failed attempts should have increased. + int rw1CurFailedAttemptsCount = rw1.replicationFailedLedgers.get(lh.getId()).get(); + assertTrue( + "The current number of failed attempts: " + rw1CurFailedAttemptsCount + + " should be greater than or equal to previous value: " + rw1PrevFailedAttemptsCount, + rw1CurFailedAttemptsCount >= rw1PrevFailedAttemptsCount); + rw1PrevFailedAttemptsCount = rw1CurFailedAttemptsCount; + + int rw2CurFailedAttemptsCount = rw2.replicationFailedLedgers.get(lh.getId()).get(); + assertTrue( + "The current number of failed attempts: " + rw2CurFailedAttemptsCount + + " should be greater than or equal to previous value: " + rw2PrevFailedAttemptsCount, + rw2CurFailedAttemptsCount >= rw2PrevFailedAttemptsCount); + rw2PrevFailedAttemptsCount = rw2CurFailedAttemptsCount; + + Thread.sleep(50); + } + + /** + * since one of the killed bookie is restarted, replicationworker + * should succeed in replicating this under replicated ledger and it + * shouldn't be under replicated anymore. + */ + int timeToWaitForReplicationToComplete = 20000; + int timeWaited = 0; + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)) { + Thread.sleep(100); + timeWaited += 100; + if (timeWaited == timeToWaitForReplicationToComplete) { + fail("Ledger should be replicated by now"); + } + } + + rw1PrevFailedAttemptsCount = rw1.replicationFailedLedgers.get(lh.getId()).get(); + rw2PrevFailedAttemptsCount = rw2.replicationFailedLedgers.get(lh.getId()).get(); + Thread.sleep(2000); + // now since the ledger is replicated, number of failed attempts + // counter shouldn't be increased even after sleeping for sometime. + assertEquals("rw1 failedattempts", rw1PrevFailedAttemptsCount, + rw1.replicationFailedLedgers.get(lh.getId()).get()); + assertEquals("rw2 failed attempts ", rw2PrevFailedAttemptsCount, + rw2.replicationFailedLedgers.get(lh.getId()).get()); + + /* + * Since these entries are eventually available, and replication has + * eventually succeeded, in one of the RW + * unableToReadEntriesForReplication should be 0. + */ + int rw1UnableToReadEntriesForReplication = rw1.unableToReadEntriesForReplication.get(lh.getId()).size(); + int rw2UnableToReadEntriesForReplication = rw2.unableToReadEntriesForReplication.get(lh.getId()).size(); + assertTrue( + "unableToReadEntriesForReplication in RW1: " + rw1UnableToReadEntriesForReplication + + " in RW2: " + + rw2UnableToReadEntriesForReplication, + (rw1UnableToReadEntriesForReplication == 0) + || (rw2UnableToReadEntriesForReplication == 0)); + } finally { + rw1.shutdown(); + rw2.shutdown(); + underReplicationManager.close(); + } + } + + class InjectedReplicationWorker extends ReplicationWorker { + CopyOnWriteArrayList delayReplicationPeriods; + + public InjectedReplicationWorker(ServerConfiguration conf, StatsLogger statsLogger, + CopyOnWriteArrayList delayReplicationPeriods) + throws CompatibilityException, ReplicationException.UnavailableException, + InterruptedException, IOException { + super(conf, statsLogger); + this.delayReplicationPeriods = delayReplicationPeriods; + } + + @Override + protected void scheduleTaskWithDelay(TimerTask timerTask, long delayPeriod) { + delayReplicationPeriods.add(delayPeriod); + super.scheduleTaskWithDelay(timerTask, delayPeriod); + } + } + + @Test + public void testDeferLedgerLockReleaseForReplicationWorker() throws Exception { + int ensembleSize = 3; + LedgerHandle lh = bkc.createLedger(ensembleSize, ensembleSize, BookKeeper.DigestType.CRC32, TESTPASSWD); + int numOfEntries = 7; + for (int i = 0; i < numOfEntries; i++) { + lh.addEntry(data); + } + lh.close(); + + BookieId[] bookiesKilled = new BookieId[ensembleSize]; + ServerConfiguration[] killedBookiesConfig = new ServerConfiguration[ensembleSize]; + + // kill all bookies + for (int i = 0; i < ensembleSize; i++) { + bookiesKilled[i] = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(i); + killedBookiesConfig[i] = getBkConf(bookiesKilled[i]); + LOG.info("Killing Bookie : {}", bookiesKilled[i]); + killBookie(bookiesKilled[i]); + } + + // start new bookiesToKill number of bookies + for (int i = 0; i < ensembleSize; i++) { + startNewBookieAndReturnBookieId(); + } + + // create couple of replicationworkers + long lockReleaseOfFailedLedgerGracePeriod = 64L; + long baseBackoffForLockReleaseOfFailedLedger = lockReleaseOfFailedLedgerGracePeriod + / (int) Math.pow(2, ReplicationWorker.NUM_OF_EXPONENTIAL_BACKOFF_RETRIALS); + ServerConfiguration newRWConf = new ServerConfiguration(baseConf); + newRWConf.setLockReleaseOfFailedLedgerGracePeriod(Long.toString(lockReleaseOfFailedLedgerGracePeriod)); + newRWConf.setRereplicationEntryBatchSize(1000); + CopyOnWriteArrayList rw1DelayReplicationPeriods = new CopyOnWriteArrayList(); + CopyOnWriteArrayList rw2DelayReplicationPeriods = new CopyOnWriteArrayList(); + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger1 = statsProvider.getStatsLogger("rw1"); + TestStatsLogger statsLogger2 = statsProvider.getStatsLogger("rw2"); + ReplicationWorker rw1 = new InjectedReplicationWorker(newRWConf, statsLogger1, rw1DelayReplicationPeriods); + ReplicationWorker rw2 = new InjectedReplicationWorker(newRWConf, statsLogger2, rw2DelayReplicationPeriods); + + Counter numEntriesUnableToReadForReplication1 = statsLogger1 + .getCounter(NUM_ENTRIES_UNABLE_TO_READ_FOR_REPLICATION); + Counter numEntriesUnableToReadForReplication2 = statsLogger2 + .getCounter(NUM_ENTRIES_UNABLE_TO_READ_FOR_REPLICATION); + @Cleanup + MetadataClientDriver clientDriver = MetadataDrivers + .getClientDriver(URI.create(baseClientConf.getMetadataServiceUri())); + clientDriver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.empty()); + + LedgerManagerFactory mFactory = clientDriver.getLedgerManagerFactory(); + + LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager(); + try { + // mark ledger underreplicated + for (int i = 0; i < bookiesKilled.length; i++) { + underReplicationManager.markLedgerUnderreplicated(lh.getId(), bookiesKilled[i].toString()); + } + while (!ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)) { + Thread.sleep(100); + } + rw1.start(); + rw2.start(); + + // wait for RWs to complete 'numOfAttemptsToWaitFor' failed attempts + int numOfAttemptsToWaitFor = 10; + while ((rw1.replicationFailedLedgers.get(lh.getId()).get() < numOfAttemptsToWaitFor) + || rw2.replicationFailedLedgers.get(lh.getId()).get() < numOfAttemptsToWaitFor) { + Thread.sleep(500); + } + + /* + * since all the bookies containing the ledger entries are down + * replication wouldn't have succeeded. + */ + assertTrue("Ledger: " + lh.getId() + " should be underreplicated", + ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)); + + /* + * since RW failed 'numOfAttemptsToWaitFor' number of times, we + * should have atleast (numOfAttemptsToWaitFor - 1) + * delayReplicationPeriods and their value should be + * (lockReleaseOfFailedLedgerGracePeriod/16) , 2 * previous value,.. + * with max : lockReleaseOfFailedLedgerGracePeriod + */ + for (int i = 0; i < ((numOfAttemptsToWaitFor - 1)); i++) { + long expectedDelayValue = Math.min(lockReleaseOfFailedLedgerGracePeriod, + baseBackoffForLockReleaseOfFailedLedger * (1 << i)); + assertEquals("RW1 delayperiod", (Long) expectedDelayValue, rw1DelayReplicationPeriods.get(i)); + assertEquals("RW2 delayperiod", (Long) expectedDelayValue, rw2DelayReplicationPeriods.get(i)); + } + + /* + * RW wont try to replicate until and unless RW succeed in reading + * those failed entries before proceeding with replication of under + * replicated fragment, so the numEntriesUnableToReadForReplication + * should be just 'numOfEntries', though RW failed to replicate + * multiple times. + */ + assertEquals("numEntriesUnableToReadForReplication for RW1", Long.valueOf((long) numOfEntries), + numEntriesUnableToReadForReplication1.get()); + assertEquals("numEntriesUnableToReadForReplication for RW2", Long.valueOf((long) numOfEntries), + numEntriesUnableToReadForReplication2.get()); + + /* + * Since these entries are unavailable, + * unableToReadEntriesForReplication should be of size numOfEntries. + */ + assertEquals("RW1 unabletoreadentries", numOfEntries, + rw1.unableToReadEntriesForReplication.get(lh.getId()).size()); + assertEquals("RW2 unabletoreadentries", numOfEntries, + rw2.unableToReadEntriesForReplication.get(lh.getId()).size()); + } finally { + rw1.shutdown(); + rw2.shutdown(); + underReplicationManager.close(); + } + } + + /** + * Tests that ReplicationWorker should not have identified for postponing + * the replication if ledger is in open state and lastFragment is not in + * underReplication state. Note that RW should not fence such ledgers. + */ + @Test + public void testRWShouldReplicateTheLedgersAfterTimeoutIfLastFragmentIsNotUR() + throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, BookKeeper.DigestType.CRC32, + TESTPASSWD); + + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + BookieId replicaToKill = lh.getLedgerMetadata().getAllEnsembles().get(0L).get(0); + + LOG.info("Killing Bookie : {}", replicaToKill); + killBookie(replicaToKill); + + BookieId newBkAddr = startNewBookieAndReturnBookieId(); + LOG.info("New Bookie addr : {}", newBkAddr); + + // Reform ensemble...Making sure that last fragment is not in + // under-replication + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + + ReplicationWorker rw = new ReplicationWorker(baseConf); + + baseClientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); + + @Cleanup MetadataClientDriver driver = MetadataDrivers.getClientDriver( + URI.create(baseClientConf.getMetadataServiceUri())); + driver.initialize(baseClientConf, scheduler, NullStatsLogger.INSTANCE, Optional.empty()); + + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + + LedgerUnderreplicationManager underReplicationManager = mFactory + .newLedgerUnderreplicationManager(); + + rw.start(); + try { + + underReplicationManager.markLedgerUnderreplicated(lh.getId(), + replicaToKill.toString()); + while (ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh + .getId(), basePath)) { + Thread.sleep(100); + } + + killAllBookies(lh, newBkAddr); + + // Should be able to read the entries from 0-9 + verifyRecoveredLedgers(lh, 0, 9); + lh = bkc.openLedgerNoRecovery(lh.getId(), + BookKeeper.DigestType.CRC32, TESTPASSWD); + + // Ledger should be still in open state + assertTrue("Ledger must have been closed by RW", ClientUtil + .isLedgerOpen(lh)); + } finally { + rw.shutdown(); + underReplicationManager.close(); + } + + } + + /** + * Test that the replication worker will not shutdown on a simple ZK disconnection. + */ + @Test + public void testRWZKConnectionLost() throws Exception { + try (ZooKeeperClient zk = ZooKeeperClient.newBuilder() + .connectString(zkUtil.getZooKeeperConnectString()) + .sessionTimeoutMs(10000) + .build()) { + + ReplicationWorker rw = new ReplicationWorker(baseConf); + rw.start(); + for (int i = 0; i < 10; i++) { + if (rw.isRunning()) { + break; + } + Thread.sleep(1000); + } + assertTrue("Replication worker should be running", rw.isRunning()); + + stopZKCluster(); + // ZK is down for shorter period than reconnect timeout + Thread.sleep(1000); + startZKCluster(); + + assertTrue("Replication worker should not shutdown", rw.isRunning()); + } + } + + /** + * Test that the replication worker shuts down on non-recoverable ZK connection loss. + */ + @Test + public void testRWZKConnectionLostOnNonRecoverableZkError() throws Exception { + for (int j = 0; j < 3; j++) { + LedgerHandle lh = bkc.createLedger(1, 1, 1, + BookKeeper.DigestType.CRC32, TESTPASSWD, + null); + final long createdLedgerId = lh.getId(); + for (int i = 0; i < 10; i++) { + lh.addEntry(data); + } + lh.close(); + } + + killBookie(2); + killBookie(1); + startNewBookie(); + startNewBookie(); + + servers.get(0).getConfiguration().setRwRereplicateBackoffMs(100); + servers.get(0).startAutoRecovery(); + + Auditor auditor = getAuditor(10, TimeUnit.SECONDS); + ReplicationWorker rw = servers.get(0).getReplicationWorker(); + + ZkLedgerUnderreplicationManager ledgerUnderreplicationManager = + (ZkLedgerUnderreplicationManager) FieldUtils.readField(auditor, + "ledgerUnderreplicationManager", true); + + ZooKeeper zkc = (ZooKeeper) FieldUtils.readField(ledgerUnderreplicationManager, "zkc", true); + auditor.submitAuditTask().get(); + + assertTrue(zkc.getState().isConnected()); + zkc.close(); + assertFalse(zkc.getState().isConnected()); + + auditor.submitAuditTask(); + rw.run(); + + for (int i = 0; i < 10; i++) { + if (!rw.isRunning() && !auditor.isRunning()) { + break; + } + Thread.sleep(1000); + } + assertFalse("Replication worker should NOT be running", rw.isRunning()); + assertFalse("Auditor should NOT be running", auditor.isRunning()); + } + + private void killAllBookies(LedgerHandle lh, BookieId excludeBK) + throws Exception { + // Killing all bookies except newly replicated bookie + for (Entry> entry : + lh.getLedgerMetadata().getAllEnsembles().entrySet()) { + List bookies = entry.getValue(); + for (BookieId bookie : bookies) { + if (bookie.equals(excludeBK)) { + continue; + } + killBookie(bookie); + } + } + } + + private void verifyRecoveredLedgers(LedgerHandle lh, long startEntryId, + long endEntryId) throws BKException, InterruptedException { + LedgerHandle lhs = bkc.openLedgerNoRecovery(lh.getId(), + BookKeeper.DigestType.CRC32, TESTPASSWD); + Enumeration entries = lhs.readEntries(startEntryId, + endEntryId); + assertTrue("Should have the elements", entries.hasMoreElements()); + while (entries.hasMoreElements()) { + LedgerEntry entry = entries.nextElement(); + assertEquals("TestReplicationWorker", new String(entry.getEntry())); + } + } + + @Test + public void testReplicateEmptyOpenStateLedger() throws Exception { + LedgerHandle lh = bkc.createLedger(3, 3, 2, BookKeeper.DigestType.CRC32, TESTPASSWD); + assertFalse(lh.getLedgerMetadata().isClosed()); + + List firstEnsemble = lh.getLedgerMetadata().getAllEnsembles().firstEntry().getValue(); + List ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next().getValue(); + killBookie(ensemble.get(1)); + + startNewBookie(); + baseConf.setOpenLedgerRereplicationGracePeriod(String.valueOf(30)); + ReplicationWorker replicationWorker = new ReplicationWorker(baseConf); + replicationWorker.start(); + + try { + underReplicationManager.markLedgerUnderreplicated(lh.getId(), ensemble.get(1).toString()); + Awaitility.waitAtMost(60, TimeUnit.SECONDS).untilAsserted(() -> + assertFalse(ReplicationTestUtil.isLedgerInUnderReplication(zkc, lh.getId(), basePath)) + ); + + LedgerHandle lh1 = bkc.openLedgerNoRecovery(lh.getId(), BookKeeper.DigestType.CRC32, TESTPASSWD); + assertTrue(lh1.getLedgerMetadata().isClosed()); + } finally { + replicationWorker.shutdown(); + } + } + + @Test + public void testRepairedNotAdheringPlacementPolicyLedgerFragmentsOnRack() throws Exception { + testRepairedNotAdheringPlacementPolicyLedgerFragments(RackawareEnsemblePlacementPolicy.class, null); + } + + @Test + public void testReplicationStats() throws Exception { + BiConsumer checkReplicationStats = (first, rw) -> { + try { + final Method rereplicate = rw.getClass().getDeclaredMethod("rereplicate"); + rereplicate.setAccessible(true); + final Object result = rereplicate.invoke(rw); + final Field statsLoggerField = rw.getClass().getDeclaredField("statsLogger"); + statsLoggerField.setAccessible(true); + final TestStatsLogger statsLogger = (TestStatsLogger) statsLoggerField.get(rw); + + final Counter numDeferLedgerLockReleaseOfFailedLedgerCounter = + statsLogger.getCounter(ReplicationStats.NUM_DEFER_LEDGER_LOCK_RELEASE_OF_FAILED_LEDGER); + final Counter numLedgersReplicatedCounter = + statsLogger.getCounter(ReplicationStats.NUM_FULL_OR_PARTIAL_LEDGERS_REPLICATED); + final Counter numNotAdheringPlacementLedgersCounter = statsLogger + .getCounter(ReplicationStats.NUM_NOT_ADHERING_PLACEMENT_LEDGERS_REPLICATED); + + assertEquals("NUM_DEFER_LEDGER_LOCK_RELEASE_OF_FAILED_LEDGER", + 1, numDeferLedgerLockReleaseOfFailedLedgerCounter.get().longValue()); + + if (first) { + assertFalse((boolean) result); + assertEquals("NUM_FULL_OR_PARTIAL_LEDGERS_REPLICATED", + 0, numLedgersReplicatedCounter.get().longValue()); + assertEquals("NUM_NOT_ADHERING_PLACEMENT_LEDGERS_REPLICATED", + 0, numNotAdheringPlacementLedgersCounter.get().longValue()); + + } else { + assertTrue((boolean) result); + assertEquals("NUM_FULL_OR_PARTIAL_LEDGERS_REPLICATED", + 1, numLedgersReplicatedCounter.get().longValue()); + assertEquals("NUM_NOT_ADHERING_PLACEMENT_LEDGERS_REPLICATED", + 1, numNotAdheringPlacementLedgersCounter.get().longValue()); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + }; + testRepairedNotAdheringPlacementPolicyLedgerFragments( + RackawareEnsemblePlacementPolicy.class, checkReplicationStats); + } + + private void testRepairedNotAdheringPlacementPolicyLedgerFragments( + Class placementPolicyClass, + BiConsumer checkReplicationStats) throws Exception { + List firstThreeBookies = servers.stream().map(ele -> { + try { + return ele.getServer().getBookieId(); + } catch (UnknownHostException e) { + return null; + } + }).filter(Objects::nonNull).collect(Collectors.toList()); + + baseClientConf.setProperty("reppDnsResolverClass", StaticDNSResolver.class.getName()); + baseClientConf.setProperty("enforceStrictZoneawarePlacement", false); + bkc.close(); + bkc = new BookKeeperTestClient(baseClientConf) { + @Override + protected EnsemblePlacementPolicy initializeEnsemblePlacementPolicy(ClientConfiguration conf, + DNSToSwitchMapping dnsResolver, + HashedWheelTimer timer, + FeatureProvider featureProvider, + StatsLogger statsLogger, + BookieAddressResolver bookieAddressResolver) + throws IOException { + EnsemblePlacementPolicy ensemblePlacementPolicy = null; + if (ZoneawareEnsemblePlacementPolicy.class == placementPolicyClass) { + ensemblePlacementPolicy = buildZoneAwareEnsemblePlacementPolicy(firstThreeBookies); + } else if (RackawareEnsemblePlacementPolicy.class == placementPolicyClass) { + ensemblePlacementPolicy = buildRackAwareEnsemblePlacementPolicy(firstThreeBookies); + } + ensemblePlacementPolicy.initialize(conf, Optional.ofNullable(dnsResolver), timer, + featureProvider, statsLogger, bookieAddressResolver); + return ensemblePlacementPolicy; + } + }; + + //This ledger not adhering placement policy, the combine(0,1,2) rack is 1. + LedgerHandle lh = bkc.createLedger(3, 3, 3, BookKeeper.DigestType.CRC32, TESTPASSWD); + + int entrySize = 10; + for (int i = 0; i < entrySize; i++) { + lh.addEntry(data); + } + lh.close(); + + int minNumRacksPerWriteQuorumConfValue = 2; + + ServerConfiguration servConf = new ServerConfiguration(confByIndex(0)); + servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue); + servConf.setProperty("reppDnsResolverClass", StaticDNSResolver.class.getName()); + servConf.setAuditorPeriodicPlacementPolicyCheckInterval(1000); + servConf.setRepairedPlacementPolicyNotAdheringBookieEnable(true); + + MutableObject auditorRef = new MutableObject(); + try { + TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef); + Gauge ledgersNotAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", + 1, ledgersNotAdheringToPlacementPolicyGuage.getSample()); + Gauge ledgersSoftlyAdheringToPlacementPolicyGuage = statsLogger + .getGauge(ReplicationStats.NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY); + assertEquals("NUM_LEDGERS_SOFTLY_ADHERING_TO_PLACEMENT_POLICY guage value", + 0, ledgersSoftlyAdheringToPlacementPolicyGuage.getSample()); + } finally { + Auditor auditor = auditorRef.getValue(); + if (auditor != null) { + auditor.close(); + } + } + + ZooKeeper zk = getZk((PulsarMetadataClientDriver) bkc.getMetadataClientDriver()); + + + Stat stat = zk.exists("/ledgers/underreplication/ledgers/0000/0000/0000/0000/urL0000000000", false); + assertNotNull(stat); + + baseConf.setRepairedPlacementPolicyNotAdheringBookieEnable(true); + BookKeeper bookKeeper = new BookKeeperTestClient(baseClientConf) { + @Override + protected EnsemblePlacementPolicy initializeEnsemblePlacementPolicy(ClientConfiguration conf, + DNSToSwitchMapping dnsResolver, + HashedWheelTimer timer, + FeatureProvider featureProvider, + StatsLogger statsLogger, + BookieAddressResolver bookieAddressResolver) + throws IOException { + EnsemblePlacementPolicy ensemblePlacementPolicy = null; + if (ZoneawareEnsemblePlacementPolicy.class == placementPolicyClass) { + ensemblePlacementPolicy = buildZoneAwareEnsemblePlacementPolicy(firstThreeBookies); + } else if (RackawareEnsemblePlacementPolicy.class == placementPolicyClass) { + ensemblePlacementPolicy = buildRackAwareEnsemblePlacementPolicy(firstThreeBookies); + } + ensemblePlacementPolicy.initialize(conf, Optional.ofNullable(dnsResolver), timer, + featureProvider, statsLogger, bookieAddressResolver); + return ensemblePlacementPolicy; + } + }; + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(REPLICATION_SCOPE); + ReplicationWorker rw = new ReplicationWorker(baseConf, bookKeeper, false, statsLogger); + + if (checkReplicationStats != null) { + checkReplicationStats.accept(true, rw); + } else { + rw.start(); + } + + //start new bookie, the rack is /rack2 + BookieId newBookieId = startNewBookieAndReturnBookieId(); + + if (checkReplicationStats != null) { + checkReplicationStats.accept(false, rw); + } + + Awaitility.await().untilAsserted(() -> { + LedgerMetadata metadata = bkc.getLedgerManager().readLedgerMetadata(lh.getId()).get().getValue(); + List newBookies = metadata.getAllEnsembles().get(0L); + assertTrue(newBookies.contains(newBookieId)); + }); + + Awaitility.await().untilAsserted(() -> { + Stat stat1 = zk.exists("/ledgers/underreplication/ledgers/0000/0000/0000/0000/urL0000000000", false); + assertNull(stat1); + }); + + for (BookieId rack1Book : firstThreeBookies) { + killBookie(rack1Book); + } + + verifyRecoveredLedgers(lh, 0, entrySize - 1); + + if (checkReplicationStats == null) { + rw.shutdown(); + } + baseConf.setRepairedPlacementPolicyNotAdheringBookieEnable(false); + bookKeeper.close(); + } + + private EnsemblePlacementPolicy buildRackAwareEnsemblePlacementPolicy(List bookieIds) { + return new RackawareEnsemblePlacementPolicy() { + @Override + public String resolveNetworkLocation(BookieId addr) { + if (bookieIds.contains(addr)) { + return "/rack1"; + } + //The other bookie is /rack2 + return "/rack2"; + } + }; + } + + private EnsemblePlacementPolicy buildZoneAwareEnsemblePlacementPolicy(List firstThreeBookies) { + return new ZoneawareEnsemblePlacementPolicy() { + @Override + protected String resolveNetworkLocation(BookieId addr) { + //The first three bookie 1 is /zone1/ud1 + //The first three bookie 2,3 is /zone1/ud2 + if (firstThreeBookies.get(0).equals(addr)) { + return "/zone1/ud1"; + } else if (firstThreeBookies.contains(addr)) { + return "/zone1/ud2"; + } + //The other bookie is /zone2/ud1 + return "/zone2/ud1"; + } + }; + } + + private TestStatsLogger startAuditorAndWaitForPlacementPolicyCheck(ServerConfiguration servConf, + MutableObject auditorRef) + throws MetadataException, CompatibilityException, KeeperException, + InterruptedException, ReplicationException.UnavailableException, UnknownHostException { + LedgerManagerFactory mFactory = driver.getLedgerManagerFactory(); + LedgerUnderreplicationManager urm = mFactory.newLedgerUnderreplicationManager(); + TestStatsProvider statsProvider = new TestStatsProvider(); + TestStatsLogger statsLogger = statsProvider.getStatsLogger(AUDITOR_SCOPE); + TestStatsProvider.TestOpStatsLogger placementPolicyCheckStatsLogger = + (TestStatsProvider.TestOpStatsLogger) statsLogger + .getOpStatsLogger(ReplicationStats.PLACEMENT_POLICY_CHECK_TIME); + + final AuditorPeriodicCheckTest.TestAuditor auditor = new AuditorPeriodicCheckTest.TestAuditor( + BookieImpl.getBookieId(servConf).toString(), servConf, bkc, false, statsLogger, null); + auditorRef.setValue(auditor); + CountDownLatch latch = auditor.getLatch(); + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 0, + placementPolicyCheckStatsLogger.getSuccessCount()); + urm.setPlacementPolicyCheckCTime(-1); + auditor.start(); + /* + * since placementPolicyCheckCTime is set to -1, placementPolicyCheck should be + * scheduled to run with no initialdelay + */ + assertTrue("placementPolicyCheck should have executed", latch.await(20, TimeUnit.SECONDS)); + for (int i = 0; i < 20; i++) { + Thread.sleep(100); + if (placementPolicyCheckStatsLogger.getSuccessCount() >= 1) { + break; + } + } + assertEquals("PLACEMENT_POLICY_CHECK_TIME SuccessCount", 1, + placementPolicyCheckStatsLogger.getSuccessCount()); + return statsLogger; + } + + private ZooKeeper getZk(PulsarMetadataClientDriver pulsarMetadataClientDriver) throws Exception { + PulsarLedgerManagerFactory pulsarLedgerManagerFactory = + (PulsarLedgerManagerFactory) pulsarMetadataClientDriver.getLedgerManagerFactory(); + Field field = pulsarLedgerManagerFactory.getClass().getDeclaredField("store"); + field.setAccessible(true); + ZKMetadataStore zkMetadataStore = (ZKMetadataStore) field.get(pulsarLedgerManagerFactory); + return zkMetadataStore.getZkClient(); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ZooKeeperUtil.java b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ZooKeeperUtil.java new file mode 100644 index 0000000000000..5113edb72c49a --- /dev/null +++ b/pulsar-metadata/src/test/java/org/apache/bookkeeper/replication/ZooKeeperUtil.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * This file is derived from ZooKeeperUtil from Apache BookKeeper + * http://bookkeeper.apache.org + */ + +package org.apache.bookkeeper.replication; + +import static org.testng.Assert.assertTrue; +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.apache.bookkeeper.test.ZooKeeperCluster; +import org.apache.bookkeeper.util.IOUtils; +import org.apache.bookkeeper.util.ZkUtils; +import org.apache.bookkeeper.zookeeper.ZooKeeperClient; +import org.apache.commons.io.FileUtils; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.server.NIOServerCnxnFactory; +import org.apache.zookeeper.server.ZooKeeperServer; +import org.apache.zookeeper.test.ClientBase; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test the zookeeper utilities. + */ +public class ZooKeeperUtil implements ZooKeeperCluster { + + static { + // org.apache.zookeeper.test.ClientBase uses FourLetterWordMain, from 3.5.3 four letter words + // are disabled by default due to security reasons + System.setProperty("zookeeper.4lw.commands.whitelist", "*"); + } + static final Logger LOG = LoggerFactory.getLogger(ZooKeeperUtil.class); + + // ZooKeeper related variables + protected Integer zooKeeperPort = 0; + private InetSocketAddress zkaddr; + + protected ZooKeeperServer zks; + protected ZooKeeper zkc; // zookeeper client + protected NIOServerCnxnFactory serverFactory; + protected File zkTmpDir; + private String connectString; + private String ledgersRootPath; + + public ZooKeeperUtil(String ledgersRootPath) { + this.ledgersRootPath = ledgersRootPath; + String loopbackIPAddr = InetAddress.getLoopbackAddress().getHostAddress(); + zkaddr = new InetSocketAddress(loopbackIPAddr, 0); + connectString = loopbackIPAddr + ":" + zooKeeperPort; + } + + @Override + public ZooKeeper getZooKeeperClient() { + return zkc; + } + + @Override + public String getZooKeeperConnectString() { + return connectString; + } + + @Override + public String getMetadataServiceUri() { + return getMetadataServiceUri("/ledgers"); + } + + @Override + public String getMetadataServiceUri(String zkLedgersRootPath) { + return "zk://" + connectString + zkLedgersRootPath; + } + + @Override + public String getMetadataServiceUri(String zkLedgersRootPath, String type) { + return "zk+" + type + "://" + connectString + zkLedgersRootPath; + } + + @Override + public void startCluster() throws Exception { + // create a ZooKeeper server(dataDir, dataLogDir, port) + LOG.debug("Running ZK server"); + ClientBase.setupTestEnv(); + zkTmpDir = IOUtils.createTempDir("zookeeper", "test"); + + // start the server and client. + restartCluster(); + + // create default bk ensemble + createBKEnsemble(ledgersRootPath); + } + + @Override + public void createBKEnsemble(String ledgersPath) throws KeeperException, InterruptedException { + int last = ledgersPath.lastIndexOf('/'); + if (last > 0) { + String pathToCreate = ledgersPath.substring(0, last); + CompletableFuture future = new CompletableFuture<>(); + if (zkc.exists(pathToCreate, false) == null) { + ZkUtils.asyncCreateFullPathOptimistic(zkc, + pathToCreate, + new byte[0], + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT, (i, s, o, s1) -> { + future.complete(null); + }, null); + } + future.join(); + } + + ZooKeeperCluster.super.createBKEnsemble(ledgersPath); + } + @Override + public void restartCluster() throws Exception { + zks = new ZooKeeperServer(zkTmpDir, zkTmpDir, + ZooKeeperServer.DEFAULT_TICK_TIME); + serverFactory = new NIOServerCnxnFactory(); + serverFactory.configure(zkaddr, 100); + serverFactory.startup(zks); + + if (0 == zooKeeperPort) { + zooKeeperPort = serverFactory.getLocalPort(); + zkaddr = new InetSocketAddress(zkaddr.getAddress().getHostAddress(), zooKeeperPort); + connectString = zkaddr.getAddress().getHostAddress() + ":" + zooKeeperPort; + } + + boolean b = ClientBase.waitForServerUp(getZooKeeperConnectString(), + ClientBase.CONNECTION_TIMEOUT); + LOG.debug("Server up: " + b); + + // create a zookeeper client + LOG.debug("Instantiate ZK Client"); + zkc = ZooKeeperClient.newBuilder() + .connectString(getZooKeeperConnectString()) + .sessionTimeoutMs(10000) + .build(); + } + + @Override + public void sleepCluster(final int time, + final TimeUnit timeUnit, + final CountDownLatch l) + throws InterruptedException, IOException { + Thread[] allthreads = new Thread[Thread.activeCount()]; + Thread.enumerate(allthreads); + for (final Thread t : allthreads) { + if (t.getName().contains("SyncThread:0")) { + Thread sleeper = new Thread() { + @SuppressWarnings("deprecation") + public void run() { + try { + t.suspend(); + l.countDown(); + timeUnit.sleep(time); + t.resume(); + } catch (Exception e) { + LOG.error("Error suspending thread", e); + } + } + }; + sleeper.start(); + return; + } + } + throw new IOException("ZooKeeper thread not found"); + } + + @Override + public void stopCluster() throws Exception { + if (zkc != null) { + zkc.close(); + } + + // shutdown ZK server + if (serverFactory != null) { + serverFactory.shutdown(); + assertTrue(ClientBase.waitForServerDown(getZooKeeperConnectString(), ClientBase.CONNECTION_TIMEOUT), + "waiting for server down"); + } + if (zks != null) { + zks.getTxnLogFactory().close(); + } + } + + @Override + public void killCluster() throws Exception { + stopCluster(); + FileUtils.deleteDirectory(zkTmpDir); + } +} diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java index 726f5ae312d19..33034ddb3fe0f 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java @@ -19,39 +19,33 @@ package org.apache.pulsar.metadata; import static org.testng.Assert.assertTrue; - import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStream; -import java.net.InetSocketAddress; +import java.lang.reflect.Field; import java.net.Socket; - -import java.nio.charset.StandardCharsets; - +import java.util.Properties; +import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; - import org.apache.commons.io.FileUtils; -import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.server.ContainerManager; -import org.apache.zookeeper.server.NIOServerCnxnFactory; -import org.apache.zookeeper.server.Request; -import org.apache.zookeeper.server.RequestProcessor; import org.apache.zookeeper.server.ServerCnxnFactory; -import org.apache.zookeeper.server.SessionTracker; import org.apache.zookeeper.server.ZooKeeperServer; +import org.apache.zookeeper.server.ZooKeeperServerMain; +import org.apache.zookeeper.server.embedded.ExitHandler; +import org.apache.zookeeper.server.embedded.ZooKeeperServerEmbedded; import org.assertj.core.util.Files; @Slf4j public class TestZKServer implements AutoCloseable { + public static final int TICK_TIME = 1000; - protected ZooKeeperServer zks; - private final File zkDataDir; - private ServerCnxnFactory serverFactory; - private ContainerManager containerManager; - private int zkPort = 0; + private final File zkDataDir; + private int zkPort; // initially this is zero + private ZooKeeperServerEmbedded zooKeeperServerEmbedded; public TestZKServer() throws Exception { this.zkDataDir = Files.newTemporaryFolder(); @@ -64,86 +58,86 @@ public TestZKServer() throws Exception { } public void start() throws Exception { - this.zks = new ZooKeeperServer(zkDataDir, zkDataDir, TICK_TIME); - this.zks.setMaxSessionTimeout(300_000); - this.serverFactory = new NIOServerCnxnFactory(); - this.serverFactory.configure(new InetSocketAddress(zkPort), 1000); - this.serverFactory.startup(zks, true); - - this.zkPort = serverFactory.getLocalPort(); - log.info("Started test ZK server on port {}", zkPort); + final Properties configZookeeper = new Properties(); + configZookeeper.put("clientPort", zkPort + ""); + configZookeeper.put("host", "127.0.0.1"); + configZookeeper.put("ticktime", TICK_TIME + ""); + zooKeeperServerEmbedded = ZooKeeperServerEmbedded + .builder() + .baseDir(zkDataDir.toPath()) + .configuration(configZookeeper) + .exitHandler(ExitHandler.LOG_ONLY) + .build(); + + zooKeeperServerEmbedded.start(60_000); + log.info("Started test ZK server on at {}", zooKeeperServerEmbedded.getConnectionString()); + + ZooKeeperServerMain zooKeeperServerMain = getZooKeeperServerMain(zooKeeperServerEmbedded); + ServerCnxnFactory serverCnxnFactory = getServerCnxnFactory(zooKeeperServerMain); + // save the port, in order to allow restarting on the same port + zkPort = serverCnxnFactory.getLocalPort(); boolean zkServerReady = waitForServerUp(this.getConnectionString(), 30_000); assertTrue(zkServerReady); + } - this.containerManager = new ContainerManager(zks.getZKDatabase(), new RequestProcessor() { - @Override - public void processRequest(Request request) throws RequestProcessorException { - String path = StandardCharsets.UTF_8.decode(request.request).toString(); - try { - zks.getZKDatabase().getDataTree().deleteNode(path, -1); - } catch (KeeperException.NoNodeException e) { - // Ok - } - } + @SneakyThrows + private static ZooKeeperServerMain getZooKeeperServerMain(ZooKeeperServerEmbedded zooKeeperServerEmbedded) { + ZooKeeperServerMain zooKeeperServerMain = readField(zooKeeperServerEmbedded.getClass(), + "mainsingle", zooKeeperServerEmbedded); + return zooKeeperServerMain; + } - @Override - public void shutdown() { + @SneakyThrows + private static ContainerManager getContainerManager(ZooKeeperServerMain zooKeeperServerMain) { + ContainerManager containerManager = readField(ZooKeeperServerMain.class, "containerManager", zooKeeperServerMain); + return containerManager; + } - } - }, 10, 10000, 0L); + @SneakyThrows + private static ZooKeeperServer getZooKeeperServer(ZooKeeperServerMain zooKeeperServerMain) { + ServerCnxnFactory serverCnxnFactory = getServerCnxnFactory(zooKeeperServerMain); + ZooKeeperServer zkServer = readField(ServerCnxnFactory.class, "zkServer", serverCnxnFactory); + return zkServer; + } + + @SneakyThrows + private static T readField(Class clazz, String field, Object object) { + Field declaredField = clazz.getDeclaredField(field); + boolean accessible = declaredField.isAccessible(); + if (!accessible) { + declaredField.setAccessible(true); + } + try { + return (T) declaredField.get(object); + } finally { + declaredField.setAccessible(accessible); + } + } + + private static ServerCnxnFactory getServerCnxnFactory(ZooKeeperServerMain zooKeeperServerMain) throws Exception { + ServerCnxnFactory serverCnxnFactory = readField(ZooKeeperServerMain.class, "cnxnFactory", zooKeeperServerMain); + return serverCnxnFactory; } public void checkContainers() throws Exception { // Make sure the container nodes are actually deleted Thread.sleep(1000); + ContainerManager containerManager = getContainerManager(getZooKeeperServerMain(zooKeeperServerEmbedded)); containerManager.checkContainers(); } public void stop() throws Exception { - if (containerManager != null) { - containerManager.stop(); - containerManager = null; - } - - if (serverFactory != null) { - serverFactory.shutdown(); - serverFactory = null; - } - - if (zks != null) { - SessionTracker sessionTracker = zks.getSessionTracker(); - zks.shutdown(); - zks.getZKDatabase().close(); - if (sessionTracker instanceof Thread) { - Thread sessionTrackerThread = (Thread) sessionTracker; - sessionTrackerThread.interrupt(); - sessionTrackerThread.join(); - } - zks = null; + if (zooKeeperServerEmbedded != null) { + zooKeeperServerEmbedded.close(); } - log.info("Stopped test ZK server"); } public void expireSession(long sessionId) { - zks.expire(new SessionTracker.Session() { - @Override - public long getSessionId() { - return sessionId; - } - - @Override - public int getTimeout() { - return 10_000; - } - - @Override - public boolean isClosing() { - return false; - } - }); + getZooKeeperServer(getZooKeeperServerMain(zooKeeperServerEmbedded)) + .expire(sessionId); } @Override @@ -152,12 +146,9 @@ public void close() throws Exception { FileUtils.deleteDirectory(zkDataDir); } - public int getPort() { - return zkPort; - } - + @SneakyThrows public String getConnectionString() { - return "127.0.0.1:" + getPort(); + return zooKeeperServerEmbedded.getConnectionString(); } public static boolean waitForServerUp(String hp, long timeout) { diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/LedgerUnderreplicationManagerTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/LedgerUnderreplicationManagerTest.java index 0df325b3c57a0..0e9c781fb9143 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/LedgerUnderreplicationManagerTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/bookkeeper/LedgerUnderreplicationManagerTest.java @@ -23,12 +23,14 @@ import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import com.google.protobuf.TextFormat; +import java.lang.reflect.Field; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; @@ -38,6 +40,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import lombok.Cleanup; @@ -54,6 +57,7 @@ import org.apache.bookkeeper.util.BookKeeperConstants; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.metadata.BaseMetadataStoreTest; +import org.apache.pulsar.metadata.api.GetResult; import org.apache.pulsar.metadata.api.MetadataStoreConfig; import org.apache.pulsar.metadata.api.NotificationType; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; @@ -296,6 +300,69 @@ public void testMarkingAsReplicated(String provider, Supplier urlSupplie assertEquals(l, lB.get(), "Should be the ledger I marked"); } + + @Test(timeOut = 10000) + public void testZkMetasStoreMarkReplicatedDeleteEmptyParentNodes() throws Exception { + methodSetup(stringSupplier(() -> zks.getConnectionString())); + + String missingReplica = "localhost:3181"; + + @Cleanup + LedgerUnderreplicationManager m1 = lmf.newLedgerUnderreplicationManager(); + + Long ledgerA = 0xfeadeefdacL; + m1.markLedgerUnderreplicated(ledgerA, missingReplica); + + Field storeField = m1.getClass().getDeclaredField("store"); + storeField.setAccessible(true); + MetadataStoreExtended metadataStore = (MetadataStoreExtended) storeField.get(m1); + + String fiveLevelPath = PulsarLedgerUnderreplicationManager.getUrLedgerPath(urLedgerPath, ledgerA); + Optional getResult = metadataStore.get(fiveLevelPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + String fourLevelPath = fiveLevelPath.substring(0, fiveLevelPath.lastIndexOf("/")); + getResult = metadataStore.get(fourLevelPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + String threeLevelPath = fourLevelPath.substring(0, fourLevelPath.lastIndexOf("/")); + getResult = metadataStore.get(threeLevelPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + String twoLevelPath = fourLevelPath.substring(0, threeLevelPath.lastIndexOf("/")); + getResult = metadataStore.get(twoLevelPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + String oneLevelPath = fourLevelPath.substring(0, twoLevelPath.lastIndexOf("/")); + getResult = metadataStore.get(oneLevelPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + getResult = metadataStore.get(urLedgerPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + + long ledgerToRereplicate = m1.getLedgerToRereplicate(); + assertEquals(ledgerToRereplicate, ledgerA); + m1.markLedgerReplicated(ledgerA); + + getResult = metadataStore.get(fiveLevelPath).get(1, TimeUnit.SECONDS); + assertFalse(getResult.isPresent()); + + getResult = metadataStore.get(fourLevelPath).get(1, TimeUnit.SECONDS); + assertFalse(getResult.isPresent()); + + getResult = metadataStore.get(threeLevelPath).get(1, TimeUnit.SECONDS); + assertFalse(getResult.isPresent()); + + getResult = metadataStore.get(twoLevelPath).get(1, TimeUnit.SECONDS); + assertFalse(getResult.isPresent()); + + getResult = metadataStore.get(oneLevelPath).get(1, TimeUnit.SECONDS); + assertFalse(getResult.isPresent()); + + getResult = metadataStore.get(urLedgerPath).get(1, TimeUnit.SECONDS); + assertTrue(getResult.isPresent()); + } + /** * Test releasing of a ledger * A ledger is released when a client decides it does not want @@ -548,6 +615,8 @@ public void testDisableLedgerReplication(String provider, Supplier urlSu final String missingReplica = "localhost:3181"; // disabling replication + AtomicInteger callbackCount = new AtomicInteger(); + lum.notifyLedgerReplicationEnabled((rc, result) -> callbackCount.incrementAndGet()); lum.disableLedgerReplication(); log.info("Disabled Ledeger Replication"); @@ -565,6 +634,7 @@ public void testDisableLedgerReplication(String provider, Supplier urlSu } catch (TimeoutException te) { // expected behaviour, as the replication is disabled } + assertEquals(callbackCount.get(), 1, "Notify callback times mismatch"); } /** @@ -585,7 +655,8 @@ public void testEnableLedgerReplication(String provider, Supplier urlSup log.debug("Unexpected exception while marking urLedger", e); fail("Unexpected exception while marking urLedger" + e.getMessage()); } - + AtomicInteger callbackCount = new AtomicInteger(); + lum.notifyLedgerReplicationEnabled((rc, result) -> callbackCount.incrementAndGet()); // disabling replication lum.disableLedgerReplication(); log.debug("Disabled Ledeger Replication"); @@ -622,6 +693,7 @@ public void testEnableLedgerReplication(String provider, Supplier urlSup znodeLatch.await(5, TimeUnit.SECONDS); log.debug("Enabled Ledeger Replication"); assertEquals(znodeLatch.getCount(), 0, "Failed to disable ledger replication!"); + assertEquals(callbackCount.get(), 2, "Notify callback times mismatch"); } finally { thread1.interrupt(); } @@ -683,6 +755,17 @@ public void testReplicasCheckCTime(String provider, Supplier urlSupplier assertEquals(underReplicaMgr1.getReplicasCheckCTime(), curTime); } + @Test(timeOut = 60000, dataProvider = "impl") + public void testLostBookieRecoveryDelay(String provider, Supplier urlSupplier) throws Exception { + methodSetup(urlSupplier); + + AtomicInteger callbackCount = new AtomicInteger(); + lum.notifyLostBookieRecoveryDelayChanged((rc, result) -> callbackCount.incrementAndGet()); + // disabling replication + lum.setLostBookieRecoveryDelay(10); + Awaitility.await().until(() -> callbackCount.get() == 2); + } + private void verifyMarkLedgerUnderreplicated(Collection missingReplica) throws Exception { Long ledgerA = 0xfeadeefdacL; String znodeA = getUrLedgerZnode(ledgerA); diff --git a/pulsar-metadata/src/test/resources/findbugsExclude.xml b/pulsar-metadata/src/test/resources/findbugsExclude.xml index e40b2a0fd9a12..bf075c39791cf 100644 --- a/pulsar-metadata/src/test/resources/findbugsExclude.xml +++ b/pulsar-metadata/src/test/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - - - - - - - \ No newline at end of file + + + + + + + + + diff --git a/pulsar-package-management/bookkeeper-storage/pom.xml b/pulsar-package-management/bookkeeper-storage/pom.xml index eda8edbf93048..39ea32e0a2307 100644 --- a/pulsar-package-management/bookkeeper-storage/pom.xml +++ b/pulsar-package-management/bookkeeper-storage/pom.xml @@ -1,4 +1,4 @@ - + - - - pulsar-package-management - org.apache.pulsar - 3.1.0-SNAPSHOT - - 4.0.0 - - pulsar-package-bookkeeper-storage - Apache Pulsar :: Package Management :: BookKeeper Storage - - - - ${project.groupId} - pulsar-package-core - ${project.version} - - - - org.apache.distributedlog - distributedlog-core - - - net.jpountz.lz4 - lz4 - - - - - - org.apache.zookeeper - zookeeper - tests - test - - - ch.qos.logback - logback-core - - - ch.qos.logback - logback-classic - - - io.netty - netty-tcnative - - - - - - - io.dropwizard.metrics - metrics-core - test - - - - - org.xerial.snappy - snappy-java - test - - - - ${project.groupId} - managed-ledger - ${project.version} - test - - - - ${project.groupId} - testmocks - ${project.version} - - - + + + pulsar-package-management + io.streamnative + 3.1.0-SNAPSHOT + + 4.0.0 + pulsar-package-bookkeeper-storage + Apache Pulsar :: Package Management :: BookKeeper Storage + + + ${project.groupId} + pulsar-package-core + ${project.version} + + + org.apache.distributedlog + distributedlog-core + + + net.jpountz.lz4 + lz4 + + + + + org.apache.zookeeper + zookeeper + tests + test + + + ch.qos.logback + logback-core + + + ch.qos.logback + logback-classic + + + io.netty + netty-tcnative + + + + + org.hamcrest + hamcrest + test + + + + io.dropwizard.metrics + metrics-core + test + + + + org.xerial.snappy + snappy-java + test + + + ${project.groupId} + managed-ledger + ${project.version} + test + + + ${project.groupId} + testmocks + ${project.version} + + diff --git a/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java b/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java index 40c2041d4e6c4..43db5ad4ba845 100644 --- a/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java +++ b/pulsar-package-management/bookkeeper-storage/src/test/java/org/apache/pulsar/packages/management/storage/bookkeeper/bookkeeper/test/BookKeeperClusterTestCase.java @@ -70,7 +70,6 @@ import org.apache.bookkeeper.meta.LedgerManager; import org.apache.bookkeeper.meta.LedgerManagerFactory; import org.apache.bookkeeper.meta.MetadataBookieDriver; -import org.apache.bookkeeper.meta.zk.ZKMetadataDriverBase; import org.apache.bookkeeper.metastore.InMemoryMetaStore; import org.apache.bookkeeper.net.BookieId; import org.apache.bookkeeper.net.BookieSocketAddress; @@ -486,7 +485,7 @@ public ServerConfiguration killBookie(int index) throws Exception { public ServerConfiguration killBookieAndWaitForZK(int index) throws Exception { ServerTester tester = servers.get(index); // IKTODO: this method is awful ServerConfiguration ret = killBookie(index); - while (zkc.exists(ZKMetadataDriverBase.resolveZkLedgersRootPath(baseConf) + "/" + AVAILABLE_NODE + "/" + while (zkc.exists("/ledgers/" + AVAILABLE_NODE + "/" + tester.getServer().getBookieId().toString(), false) != null) { Thread.sleep(500); } diff --git a/pulsar-package-management/core/pom.xml b/pulsar-package-management/core/pom.xml index 605c4c13e61fa..242bb67d156d7 100644 --- a/pulsar-package-management/core/pom.xml +++ b/pulsar-package-management/core/pom.xml @@ -1,4 +1,4 @@ - + - - - pulsar-package-management - org.apache.pulsar - 3.1.0-SNAPSHOT - - 4.0.0 - - pulsar-package-core - Apache Pulsar :: Package Management :: Core - - - - ${project.groupId} - pulsar-client-admin-api - ${project.parent.version} - - - - com.google.guava - guava - - - - org.apache.commons - commons-lang3 - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - - - ${pulsar.client.compiler.release} - - - - org.gaul - modernizer-maven-plugin - - true - 17 - - - - modernizer - verify - - modernizer - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - + + + pulsar-package-management + io.streamnative + 3.1.0-SNAPSHOT + + 4.0.0 + pulsar-package-core + Apache Pulsar :: Package Management :: Core + + + ${project.groupId} + pulsar-client-admin-api + ${project.parent.version} + + + com.google.guava + guava + + + org.apache.commons + commons-lang3 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + + ${pulsar.client.compiler.release} + + + + org.gaul + modernizer-maven-plugin + + true + 17 + + + + modernizer + verify + + modernizer + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + diff --git a/pulsar-package-management/core/src/main/resources/findbugsExclude.xml b/pulsar-package-management/core/src/main/resources/findbugsExclude.xml index 3a2e998dce984..bd8d2c4f77efa 100644 --- a/pulsar-package-management/core/src/main/resources/findbugsExclude.xml +++ b/pulsar-package-management/core/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - - pulsar-package-management - org.apache.pulsar - 3.1.0-SNAPSHOT - - 4.0.0 - - pulsar-package-filesystem-storage - Apache Pulsar :: Package Management :: Filesystem Storage - - - - ${project.groupId} - pulsar-package-core - ${project.parent.version} - - - - com.google.guava - guava - - - - ${project.groupId} - testmocks - ${project.parent.version} - test - - - + + + pulsar-package-management + io.streamnative + 3.1.0-SNAPSHOT + + 4.0.0 + pulsar-package-filesystem-storage + Apache Pulsar :: Package Management :: Filesystem Storage + + + ${project.groupId} + pulsar-package-core + ${project.parent.version} + + + com.google.guava + guava + + + ${project.groupId} + testmocks + ${project.parent.version} + test + + diff --git a/pulsar-package-management/pom.xml b/pulsar-package-management/pom.xml index 3a61f659d3109..aedc954344f49 100644 --- a/pulsar-package-management/pom.xml +++ b/pulsar-package-management/pom.xml @@ -1,4 +1,4 @@ - + - - - pulsar - org.apache.pulsar - 3.1.0-SNAPSHOT - .. - - 4.0.0 - - pulsar-package-management - pom - Apache Pulsar :: Package Management - - core - bookkeeper-storage - filesystem-storage - - - - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - - spotbugs - verify - - check - - - - - - + + + pulsar + io.streamnative + 3.1.0-SNAPSHOT + .. + + 4.0.0 + pulsar-package-management + pom + Apache Pulsar :: Package Management + + core + bookkeeper-storage + filesystem-storage + + + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + + spotbugs + verify + + check + + + + + + diff --git a/pulsar-proxy/pom.xml b/pulsar-proxy/pom.xml index de1eb9a16c05b..ae6f93e7c64f8 100644 --- a/pulsar-proxy/pom.xml +++ b/pulsar-proxy/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT - pulsar-proxy Pulsar Proxy - ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} pulsar-broker-common ${project.version} - ${project.groupId} pulsar-common ${project.version} - ${project.groupId} pulsar-docs-tools ${project.version} - ${project.groupId} pulsar-websocket ${project.version} - org.apache.commons commons-lang3 - io.swagger swagger-annotations provided - org.eclipse.jetty jetty-server - org.eclipse.jetty jetty-alpn-conscrypt-server - org.eclipse.jetty jetty-servlet - org.eclipse.jetty jetty-servlets - org.eclipse.jetty jetty-proxy - org.glassfish.jersey.core jersey-server - org.glassfish.jersey.containers jersey-container-servlet-core - org.glassfish.jersey.media jersey-media-json-jackson - com.google.guava guava - com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider - org.glassfish.jersey.inject jersey-hk2 - javax.xml.bind jaxb-api - com.sun.activation javax.activation - io.prometheus simpleclient - io.prometheus simpleclient_hotspot - io.prometheus simpleclient_servlet - io.prometheus simpleclient_jetty - ${project.groupId} pulsar-broker ${project.version} test - ${project.groupId} testmocks ${project.version} test - org.awaitility awaitility test - com.beust jcommander - org.apache.logging.log4j log4j-core - com.github.seancfoley ipaddress @@ -295,5 +264,4 @@ - diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/extensions/ProxyExtensions.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/extensions/ProxyExtensions.java index 75059fc0d2551..95a3bf032fe21 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/extensions/ProxyExtensions.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/extensions/ProxyExtensions.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.SocketAddress; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -49,6 +50,9 @@ public class ProxyExtensions implements AutoCloseable { * @return the collection of extensions */ public static ProxyExtensions load(ProxyConfiguration conf) throws IOException { + if (conf.getProxyExtensions().isEmpty()) { + return new ProxyExtensions(Collections.emptyMap()); + } ExtensionsDefinitions definitions = ProxyExtensionsUtils.searchForExtensions( conf.getProxyExtensionsDirectory(), conf.getNarExtractionDirectory()); diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java index d9dda9823ea89..c528ceb2cf5b7 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java @@ -41,8 +41,10 @@ import org.apache.pulsar.client.api.Authentication; import org.apache.pulsar.client.api.AuthenticationDataProvider; import org.apache.pulsar.client.api.AuthenticationFactory; +import org.apache.pulsar.client.api.KeyStoreParams; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.util.SecurityUtility; +import org.apache.pulsar.common.util.keystoretls.KeyStoreSSLContext; import org.apache.pulsar.policies.data.loadbalancer.ServiceLookupData; import org.eclipse.jetty.client.HttpClient; import org.eclipse.jetty.client.HttpRequest; @@ -269,20 +271,35 @@ protected HttpClient newHttpClient() { SSLContext sslCtx; AuthenticationDataProvider authData = auth.getAuthData(); - if (authData.hasDataForTls()) { - sslCtx = SecurityUtility.createSslContext( + if (config.isBrokerClientTlsEnabledWithKeyStore()) { + KeyStoreParams params = authData.hasDataForTls() ? authData.getTlsKeyStoreParams() : null; + sslCtx = KeyStoreSSLContext.createClientSslContext( + config.getBrokerClientSslProvider(), + params != null ? params.getKeyStoreType() : null, + params != null ? params.getKeyStorePath() : null, + params != null ? params.getKeyStorePassword() : null, config.isTlsAllowInsecureConnection(), - trustCertificates, - authData.getTlsCertificates(), - authData.getTlsPrivateKey(), - config.getBrokerClientSslProvider() - ); + config.getBrokerClientTlsTrustStoreType(), + config.getBrokerClientTlsTrustStore(), + config.getBrokerClientTlsTrustStorePassword(), + config.getBrokerClientTlsCiphers(), + config.getBrokerClientTlsProtocols()); } else { - sslCtx = SecurityUtility.createSslContext( - config.isTlsAllowInsecureConnection(), - trustCertificates, - config.getBrokerClientSslProvider() - ); + if (authData.hasDataForTls()) { + sslCtx = SecurityUtility.createSslContext( + config.isTlsAllowInsecureConnection(), + trustCertificates, + authData.getTlsCertificates(), + authData.getTlsPrivateKey(), + config.getBrokerClientSslProvider() + ); + } else { + sslCtx = SecurityUtility.createSslContext( + config.isTlsAllowInsecureConnection(), + trustCertificates, + config.getBrokerClientSslProvider() + ); + } } SslContextFactory contextFactory = new SslContextFactory.Client(); diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java index 3ecd670cbbf7a..db2969e3c3920 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java @@ -173,35 +173,43 @@ public class ProxyConfiguration implements PulsarConfiguration { @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The service url points to the broker cluster. URL must have the pulsar:// prefix." + doc = "If does not set metadataStoreUrl or configurationMetadataStoreUrl, this url should point to the" + + " discovery service provider." + + " URL must have the pulsar:// prefix. And does not support multi url yet." ) private String brokerServiceURL; @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The tls service url points to the broker cluster. URL must have the pulsar+ssl:// prefix." + doc = "If does not set metadataStoreUrl or configurationMetadataStoreUrl, this url should point to the" + + " discovery service provider." + + " URL must have the pulsar+ssl:// prefix. And does not support multi url yet." ) private String brokerServiceURLTLS; @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The web service url points to the broker cluster" + doc = "The web service url points to the discovery service provider of the broker cluster, and does not support" + + " multi url yet." ) private String brokerWebServiceURL; @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The tls web service url points to the broker cluster" + doc = "The tls web service url points to the discovery service provider of the broker cluster, and does not" + + " support multi url yet." ) private String brokerWebServiceURLTLS; @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The web service url points to the function worker cluster." + doc = "The web service url points to the discovery service provider of the function worker cluster, and does" + + " not support multi url yet." + " Only configure it when you setup function workers in a separate cluster" ) private String functionWorkerWebServiceURL; @FieldContext( category = CATEGORY_BROKER_DISCOVERY, - doc = "The tls web service url points to the function worker cluster." + doc = "The tls web service url points to the discovery service provider of the function worker cluster, and" + + " does not support multi url yet." + " Only configure it when you setup function workers in a separate cluster" ) private String functionWorkerWebServiceURLTLS; @@ -371,6 +379,12 @@ public class ProxyConfiguration implements PulsarConfiguration { ) private int authenticationRefreshCheckSeconds = 60; + @FieldContext( + category = CATEGORY_HTTP, + doc = "Whether to enable the proxy's /metrics and /proxy-stats http endpoints" + ) + private boolean enableProxyStatsEndpoints = true; + @FieldContext( category = CATEGORY_AUTHENTICATION, doc = "Whether the '/metrics' endpoint requires authentication. Defaults to true." diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java index b4854780d5471..e623d4b85aa09 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java @@ -159,11 +159,28 @@ public ProxyServiceStarter(String[] args) throws Exception { if (isNotBlank(config.getBrokerServiceURL())) { checkArgument(config.getBrokerServiceURL().startsWith("pulsar://"), "brokerServiceURL must start with pulsar://"); + ensureUrlNotContainsComma("brokerServiceURL", config.getBrokerServiceURL()); } - if (isNotBlank(config.getBrokerServiceURLTLS())) { checkArgument(config.getBrokerServiceURLTLS().startsWith("pulsar+ssl://"), "brokerServiceURLTLS must start with pulsar+ssl://"); + ensureUrlNotContainsComma("brokerServiceURLTLS", config.getBrokerServiceURLTLS()); + } + + if (isNotBlank(config.getBrokerWebServiceURL())) { + ensureUrlNotContainsComma("brokerWebServiceURL", config.getBrokerWebServiceURL()); + } + if (isNotBlank(config.getBrokerWebServiceURLTLS())) { + ensureUrlNotContainsComma("brokerWebServiceURLTLS", config.getBrokerWebServiceURLTLS()); + } + + if (isNotBlank(config.getFunctionWorkerWebServiceURL())) { + ensureUrlNotContainsComma("functionWorkerWebServiceURLTLS", + config.getFunctionWorkerWebServiceURL()); + } + if (isNotBlank(config.getFunctionWorkerWebServiceURLTLS())) { + ensureUrlNotContainsComma("functionWorkerWebServiceURLTLS", + config.getFunctionWorkerWebServiceURLTLS()); } if ((isBlank(config.getBrokerServiceURL()) && isBlank(config.getBrokerServiceURLTLS())) @@ -184,6 +201,11 @@ public ProxyServiceStarter(String[] args) throws Exception { } } + private void ensureUrlNotContainsComma(String paramName, String paramValue) { + checkArgument(!paramValue.contains(","), paramName + " does not support multi urls yet," + + " it should point to the discovery service provider."); + } + public static void main(String[] args) throws Exception { ProxyServiceStarter serviceStarter = new ProxyServiceStarter(args); try { @@ -253,15 +275,21 @@ public static void addWebServerHandlers(WebServer server, ProxyConfiguration config, ProxyService service, BrokerDiscoveryProvider discoveryProvider) throws Exception { - if (service != null) { - PrometheusMetricsServlet metricsServlet = service.getMetricsServlet(); - if (metricsServlet != null) { - server.addServlet("/metrics", new ServletHolder(metricsServlet), - Collections.emptyList(), config.isAuthenticateMetricsEndpoint()); + // We can make 'status.html' publicly accessible without authentication since + // it does not contain any sensitive data. + server.addRestResource("/", VipStatus.ATTRIBUTE_STATUS_FILE_PATH, config.getStatusFilePath(), + VipStatus.class, false); + if (config.isEnableProxyStatsEndpoints()) { + server.addRestResource("/proxy-stats", ProxyStats.ATTRIBUTE_PULSAR_PROXY_NAME, service, + ProxyStats.class); + if (service != null) { + PrometheusMetricsServlet metricsServlet = service.getMetricsServlet(); + if (metricsServlet != null) { + server.addServlet("/metrics", new ServletHolder(metricsServlet), + Collections.emptyList(), config.isAuthenticateMetricsEndpoint()); + } } } - server.addRestResource("/", VipStatus.ATTRIBUTE_STATUS_FILE_PATH, config.getStatusFilePath(), VipStatus.class); - server.addRestResource("/proxy-stats", ProxyStats.ATTRIBUTE_PULSAR_PROXY_NAME, service, ProxyStats.class); AdminProxyHandler adminProxyHandler = new AdminProxyHandler(config, discoveryProvider); ServletHolder servletHolder = new ServletHolder(adminProxyHandler); diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java index 1ca8dc93ebf9e..b95bbcab08b11 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java @@ -197,12 +197,20 @@ public void addServlet(String basePath, ServletHolder servletHolder, List> attributes, boolean requireAuthentication) { + addServlet(basePath, servletHolder, attributes, requireAuthentication, true); + } + + private void addServlet(String basePath, ServletHolder servletHolder, + List> attributes, boolean requireAuthentication, boolean checkForExistingPaths) { popularServletParams(servletHolder, config); - Optional existingPath = servletPaths.stream().filter(p -> p.startsWith(basePath)).findFirst(); - if (existingPath.isPresent()) { - throw new IllegalArgumentException( - String.format("Cannot add servlet at %s, path %s already exists", basePath, existingPath.get())); + if (checkForExistingPaths) { + Optional existingPath = servletPaths.stream().filter(p -> p.startsWith(basePath)).findFirst(); + if (existingPath.isPresent()) { + throw new IllegalArgumentException( + String.format("Cannot add servlet at %s, path %s already exists", basePath, + existingPath.get())); + } } servletPaths.add(basePath); @@ -231,17 +239,40 @@ private static void popularServletParams(ServletHolder servletHolder, ProxyConfi } } + /** + * Add a REST resource to the servlet context with authentication coverage. + * + * @see WebServer#addRestResource(String, String, Object, Class, boolean) + * + * @param basePath The base path for the resource. + * @param attribute An attribute associated with the resource. + * @param attributeValue The value of the attribute. + * @param resourceClass The class representing the resource. + */ public void addRestResource(String basePath, String attribute, Object attributeValue, Class resourceClass) { + addRestResource(basePath, attribute, attributeValue, resourceClass, true); + } + + /** + * Add a REST resource to the servlet context. + * + * @param basePath The base path for the resource. + * @param attribute An attribute associated with the resource. + * @param attributeValue The value of the attribute. + * @param resourceClass The class representing the resource. + * @param requireAuthentication A boolean indicating whether authentication is required for this resource. + */ + public void addRestResource(String basePath, String attribute, Object attributeValue, + Class resourceClass, boolean requireAuthentication) { ResourceConfig config = new ResourceConfig(); config.register(resourceClass); config.register(JsonMapperProvider.class); ServletHolder servletHolder = new ServletHolder(new ServletContainer(config)); servletHolder.setAsyncSupported(true); - ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); - context.setContextPath(basePath); - context.addServlet(servletHolder, MATCH_ALL); - context.setAttribute(attribute, attributeValue); - handlers.add(context); + // This method has not historically checked for existing paths, so we don't check here either. The + // method call is added to reduce code duplication. + addServlet(basePath, servletHolder, Collections.singletonList(Pair.of(attribute, attributeValue)), + requireAuthentication, false); } public int getExternalServicePort() { diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/stats/ProxyStats.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/stats/ProxyStats.java index afa2e12dabb4f..67fe30db1613f 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/stats/ProxyStats.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/stats/ProxyStats.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.proxy.stats; +import static java.util.concurrent.TimeUnit.SECONDS; import io.netty.channel.Channel; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; @@ -27,7 +28,10 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.Path; @@ -36,19 +40,27 @@ import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response.Status; +import org.apache.pulsar.broker.authentication.AuthenticationDataSource; +import org.apache.pulsar.broker.authentication.AuthenticationParameters; +import org.apache.pulsar.broker.web.AuthenticationFilter; import org.apache.pulsar.proxy.server.ProxyService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Path("/") @Api(value = "/proxy-stats", description = "Stats for proxy", tags = "proxy-stats", hidden = true) @Produces(MediaType.APPLICATION_JSON) public class ProxyStats { + private static final Logger log = LoggerFactory.getLogger(ProxyStats.class); public static final String ATTRIBUTE_PULSAR_PROXY_NAME = "pulsar-proxy"; private ProxyService service; @Context protected ServletContext servletContext; + @Context + protected HttpServletRequest httpRequest; @GET @Path("/connections") @@ -56,6 +68,7 @@ public class ProxyStats { response = List.class, responseContainer = "List") @ApiResponses(value = { @ApiResponse(code = 503, message = "Proxy service is not initialized") }) public List metrics() { + throwIfNotSuperUser("metrics"); List stats = new ArrayList<>(); proxyService().getClientCnxs().forEach(cnx -> { if (cnx.getDirectProxyHandler() == null) { @@ -76,7 +89,7 @@ public List metrics() { @ApiResponses(value = { @ApiResponse(code = 412, message = "Proxy logging should be > 2 to capture topic stats"), @ApiResponse(code = 503, message = "Proxy service is not initialized") }) public Map topics() { - + throwIfNotSuperUser("topics"); Optional logLevel = proxyService().getConfiguration().getProxyLogLevel(); if (!logLevel.isPresent() || logLevel.get() < 2) { throw new RestException(Status.PRECONDITION_FAILED, "Proxy doesn't have logging level 2"); @@ -90,6 +103,7 @@ public Map topics() { notes = "It only changes log-level in memory, change it config file to persist the change") @ApiResponses(value = { @ApiResponse(code = 412, message = "Proxy log level can be [0-2]"), }) public void updateProxyLogLevel(@PathParam("logLevel") int logLevel) { + throwIfNotSuperUser("updateProxyLogLevel"); if (logLevel < 0 || logLevel > 2) { throw new RestException(Status.PRECONDITION_FAILED, "Proxy log level can be only [0-2]"); } @@ -100,6 +114,7 @@ public void updateProxyLogLevel(@PathParam("logLevel") int logLevel) { @Path("/logging") @ApiOperation(hidden = true, value = "Get proxy logging") public int getProxyLogLevel(@PathParam("logLevel") int logLevel) { + throwIfNotSuperUser("getProxyLogLevel"); return proxyService().getProxyLogLevel(); } @@ -112,4 +127,26 @@ protected ProxyService proxyService() { } return service; } + + private void throwIfNotSuperUser(String action) { + if (proxyService().getConfiguration().isAuthorizationEnabled()) { + AuthenticationParameters authParams = AuthenticationParameters.builder() + .clientRole((String) httpRequest.getAttribute(AuthenticationFilter.AuthenticatedRoleAttributeName)) + .clientAuthenticationDataSource((AuthenticationDataSource) + httpRequest.getAttribute(AuthenticationFilter.AuthenticatedDataAttributeName)) + .build(); + try { + if (authParams.getClientRole() == null + || !proxyService().getAuthorizationService().isSuperUser(authParams).get(30, SECONDS)) { + log.error("Client with role [{}] is not authorized to {}", authParams.getClientRole(), action); + throw new org.apache.pulsar.common.util.RestException(Status.UNAUTHORIZED, + "Client is not authorized to perform operation"); + } + } catch (ExecutionException | TimeoutException | InterruptedException e) { + log.warn("Time-out {} sec while checking the role {} is a super user role ", 30, + authParams.getClientRole()); + throw new org.apache.pulsar.common.util.RestException(Status.INTERNAL_SERVER_ERROR, e.getMessage()); + } + } + } } diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AdminProxyHandlerKeystoreTLSTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AdminProxyHandlerKeystoreTLSTest.java new file mode 100644 index 0000000000000..d6796b7eaa6d2 --- /dev/null +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AdminProxyHandlerKeystoreTLSTest.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.proxy.server; + +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.authentication.AuthenticationProviderTls; +import org.apache.pulsar.broker.authentication.AuthenticationService; +import org.apache.pulsar.broker.resources.PulsarResources; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.impl.auth.AuthenticationKeyStoreTls; +import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.pulsar.policies.data.loadbalancer.LoadManagerReport; +import org.apache.pulsar.policies.data.loadbalancer.LoadReport; +import org.eclipse.jetty.servlet.ServletHolder; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.util.HashSet; +import java.util.Optional; +import java.util.Set; + +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; + +public class AdminProxyHandlerKeystoreTLSTest extends MockedPulsarServiceBaseTest { + + + private final ProxyConfiguration proxyConfig = new ProxyConfiguration(); + + private WebServer webServer; + + private BrokerDiscoveryProvider discoveryProvider; + + private PulsarResources resource; + + @BeforeMethod + @Override + protected void setup() throws Exception { + + conf.setAuthenticationEnabled(true); + conf.setAuthorizationEnabled(false); + conf.setWebServicePortTls(Optional.of(0)); + conf.setBrokerServicePortTls(Optional.of(0)); + conf.setTlsEnabledWithKeyStore(true); + conf.setTlsAllowInsecureConnection(false); + conf.setTlsKeyStoreType(KEYSTORE_TYPE); + conf.setTlsKeyStore(BROKER_KEYSTORE_FILE_PATH); + conf.setTlsKeyStorePassword(BROKER_KEYSTORE_PW); + conf.setTlsTrustStoreType(KEYSTORE_TYPE); + conf.setTlsTrustStore(CLIENT_TRUSTSTORE_FILE_PATH); + conf.setTlsTrustStorePassword(CLIENT_TRUSTSTORE_PW); + + super.internalSetup(); + + proxyConfig.setWebServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); + proxyConfig.setServicePortTls(Optional.of(0)); + proxyConfig.setWebServicePortTls(Optional.of(0)); + proxyConfig.setTlsEnabledWithBroker(true); + proxyConfig.setTlsEnabledWithKeyStore(true); + + proxyConfig.setTlsKeyStoreType(KEYSTORE_TYPE); + proxyConfig.setTlsKeyStore(BROKER_KEYSTORE_FILE_PATH); + proxyConfig.setTlsKeyStorePassword(BROKER_KEYSTORE_PW); + proxyConfig.setTlsTrustStoreType(KEYSTORE_TYPE); + proxyConfig.setTlsTrustStore(CLIENT_TRUSTSTORE_FILE_PATH); + proxyConfig.setTlsTrustStorePassword(CLIENT_TRUSTSTORE_PW); + + proxyConfig.setMetadataStoreUrl(DUMMY_VALUE); + proxyConfig.setConfigurationMetadataStoreUrl(GLOBAL_DUMMY_VALUE); + proxyConfig.setBrokerClientTlsEnabledWithKeyStore(true); + proxyConfig.setBrokerClientTlsKeyStoreType(KEYSTORE_TYPE); + proxyConfig.setBrokerClientTlsKeyStore(BROKER_KEYSTORE_FILE_PATH); + proxyConfig.setBrokerClientTlsKeyStorePassword(BROKER_KEYSTORE_PW); + proxyConfig.setBrokerClientTlsTrustStoreType(KEYSTORE_TYPE); + proxyConfig.setBrokerClientTlsTrustStore(BROKER_TRUSTSTORE_FILE_PATH); + proxyConfig.setBrokerClientTlsTrustStorePassword(BROKER_TRUSTSTORE_PW); + Set providers = new HashSet<>(); + providers.add(AuthenticationProviderTls.class.getName()); + proxyConfig.setAuthenticationProviders(providers); + proxyConfig.setBrokerClientAuthenticationPlugin(AuthenticationKeyStoreTls.class.getName()); + proxyConfig.setBrokerClientAuthenticationParameters(String.format("keyStoreType:%s,keyStorePath:%s,keyStorePassword:%s", + KEYSTORE_TYPE, BROKER_KEYSTORE_FILE_PATH, BROKER_KEYSTORE_PW)); + + resource = new PulsarResources(new ZKMetadataStore(mockZooKeeper), + new ZKMetadataStore(mockZooKeeperGlobal)); + webServer = new WebServer(proxyConfig, new AuthenticationService( + PulsarConfigurationLoader.convertFrom(proxyConfig))); + discoveryProvider = spy(new BrokerDiscoveryProvider(proxyConfig, resource)); + LoadManagerReport report = new LoadReport(brokerUrl.toString(), brokerUrlTls.toString(), null, null); + doReturn(report).when(discoveryProvider).nextBroker(); + ServletHolder servletHolder = new ServletHolder(new AdminProxyHandler(proxyConfig, discoveryProvider)); + webServer.addServlet("/admin", servletHolder); + webServer.addServlet("/lookup", servletHolder); + webServer.start(); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + webServer.stop(); + super.internalCleanup(); + } + + PulsarAdmin getAdminClient() throws Exception { + return PulsarAdmin.builder() + .serviceHttpUrl("https://localhost:" + webServer.getListenPortHTTPS().get()) + .useKeyStoreTls(true) + .allowTlsInsecureConnection(false) + .tlsTrustStorePath(BROKER_TRUSTSTORE_FILE_PATH) + .tlsTrustStorePassword(BROKER_TRUSTSTORE_PW) + .authentication(AuthenticationKeyStoreTls.class.getName(), + String.format("keyStoreType:%s,keyStorePath:%s,keyStorePassword:%s", + KEYSTORE_TYPE, BROKER_KEYSTORE_FILE_PATH, BROKER_KEYSTORE_PW)) + .build(); + } + + @Test + public void testAdmin() throws Exception { + getAdminClient().clusters().createCluster(configClusterName, ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); + } + +} diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java index 8229d929ee5e3..9c8e5197adf1a 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java @@ -168,7 +168,7 @@ protected void setup() throws Exception { conf.setBrokerClientAuthenticationPlugin(BasicAuthentication.class.getName()); // Expires after an hour conf.setBrokerClientAuthenticationParameters( - "entityType:broker,expiryTime:" + (System.currentTimeMillis() + 3600 * 1000)); + "entityType:admin,expiryTime:" + (System.currentTimeMillis() + 3600 * 1000)); Set superUserRoles = new HashSet<>(); superUserRoles.add("admin"); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConfigurationTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConfigurationTest.java index 97a73c20b60d0..a9a562e04c899 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConfigurationTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConfigurationTest.java @@ -20,6 +20,8 @@ import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; +import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.testng.annotations.Test; import java.beans.Introspector; @@ -36,6 +38,8 @@ import java.util.Properties; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; @Test(groups = "broker") public class ProxyConfigurationTest { @@ -134,4 +138,119 @@ public void testConvert() throws IOException { } } + @Test + public void testBrokerUrlCheck() throws IOException { + ProxyConfiguration configuration = new ProxyConfiguration(); + // brokerServiceURL must start with pulsar:// + configuration.setBrokerServiceURL("127.0.0.1:6650"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerServiceURL must start with pulsar://"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("brokerServiceURL must start with pulsar://")); + } + } + configuration.setBrokerServiceURL("pulsar://127.0.0.1:6650"); + + // brokerServiceURLTLS must start with pulsar+ssl:// + configuration.setBrokerServiceURLTLS("pulsar://127.0.0.1:6650"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerServiceURLTLS must start with pulsar+ssl://"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("brokerServiceURLTLS must start with pulsar+ssl://")); + } + } + + // brokerServiceURL did not support multi urls yet. + configuration.setBrokerServiceURL("pulsar://127.0.0.1:6650,pulsar://127.0.0.2:6650"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerServiceURL does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setBrokerServiceURL("pulsar://127.0.0.1:6650"); + + // brokerServiceURLTLS did not support multi urls yet. + configuration.setBrokerServiceURLTLS("pulsar+ssl://127.0.0.1:6650,pulsar+ssl:127.0.0.2:6650"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerServiceURLTLS does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setBrokerServiceURLTLS("pulsar+ssl://127.0.0.1:6650"); + + // brokerWebServiceURL did not support multi urls yet. + configuration.setBrokerWebServiceURL("http://127.0.0.1:8080,http://127.0.0.2:8080"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerWebServiceURL does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setBrokerWebServiceURL("http://127.0.0.1:8080"); + + // brokerWebServiceURLTLS did not support multi urls yet. + configuration.setBrokerWebServiceURLTLS("https://127.0.0.1:443,https://127.0.0.2:443"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("brokerWebServiceURLTLS does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setBrokerWebServiceURLTLS("https://127.0.0.1:443"); + + // functionWorkerWebServiceURL did not support multi urls yet. + configuration.setFunctionWorkerWebServiceURL("http://127.0.0.1:8080,http://127.0.0.2:8080"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("functionWorkerWebServiceURL does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setFunctionWorkerWebServiceURL("http://127.0.0.1:8080"); + + // functionWorkerWebServiceURLTLS did not support multi urls yet. + configuration.setFunctionWorkerWebServiceURLTLS("http://127.0.0.1:443,http://127.0.0.2:443"); + try (MockedStatic theMock = Mockito.mockStatic(PulsarConfigurationLoader.class)) { + theMock.when(PulsarConfigurationLoader.create(Mockito.anyString(), Mockito.any())) + .thenReturn(configuration); + try { + new ProxyServiceStarter(ProxyServiceStarterTest.ARGS); + fail("functionWorkerWebServiceURLTLS does not support multi urls yet"); + } catch (Exception ex) { + assertTrue(ex.getMessage().contains("does not support multi urls yet")); + } + } + configuration.setFunctionWorkerWebServiceURLTLS("http://127.0.0.1:443"); + } + } \ No newline at end of file diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java index 99af3b1cf6abe..b7cfb87474707 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java @@ -53,7 +53,7 @@ protected void setup() throws Exception { conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); conf.setBrokerClientAuthenticationPlugin(BasicAuthentication.class.getName()); - conf.setBrokerClientAuthenticationParameters("authParam:broker"); + conf.setBrokerClientAuthenticationParameters("authParam:admin"); conf.setAuthenticateOriginalAuthData(true); Set superUserRoles = new HashSet(); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java index bde989fc432f9..2f36cc679f1f2 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java @@ -69,6 +69,7 @@ protected void doInitConf() throws Exception { conf.setAdvertisedAddress(null); conf.setAuthenticateOriginalAuthData(true); conf.setBrokerServicePort(Optional.of(0)); + conf.setWebServicePortTls(Optional.of(0)); conf.setWebServicePort(Optional.of(0)); Set superUserRoles = new HashSet<>(); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java index 2c8c382b6a5ef..3259cfd95c741 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java @@ -144,7 +144,7 @@ protected void setup() throws Exception { conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); conf.setBrokerClientAuthenticationPlugin(BasicAuthentication.class.getName()); - conf.setBrokerClientAuthenticationParameters("authParam:broker"); + conf.setBrokerClientAuthenticationParameters("authParam:admin"); Set superUserRoles = new HashSet<>(); superUserRoles.add("admin"); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java index def58be6df372..a9bead706a373 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java @@ -45,7 +45,7 @@ public class ProxyServiceStarterTest extends MockedPulsarServiceBaseTest { - static final String[] ARGS = new String[]{"-c", "./src/test/resources/proxy.conf"}; + public static final String[] ARGS = new String[]{"-c", "./src/test/resources/proxy.conf"}; protected ProxyServiceStarter serviceStarter; protected String serviceUrl; diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java index 01c06fbf52f4e..6247c2a66e874 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java @@ -75,6 +75,7 @@ protected void setup() throws Exception { protected void doInitConf() throws Exception { super.doInitConf(); + this.conf.setBrokerServicePortTls(Optional.of(0)); this.conf.setTlsCertificateFilePath(PROXY_CERT_FILE_PATH); this.conf.setTlsKeyFilePath(PROXY_KEY_FILE_PATH); } diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java index e8bb128c8c190..2d97a4b06a856 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java @@ -78,6 +78,8 @@ public class ProxyWithAuthorizationNegTest extends ProducerConsumerBase { protected void setup() throws Exception { // enable tls and auth&auth at broker + conf.setTopicLevelPoliciesEnabled(false); + conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java index e912006faa022..88ecfe8a3187b 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java @@ -116,6 +116,7 @@ protected void setup() throws Exception { proxyConfig.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); proxyConfig.setBrokerClientAuthenticationParameters(PROXY_TOKEN); proxyConfig.setAuthenticationProviders(providers); + proxyConfig.setStatusFilePath("./src/test/resources/vip_status.html"); AuthenticationService authService = new AuthenticationService(PulsarConfigurationLoader.convertFrom(proxyConfig)); @@ -405,6 +406,29 @@ public void testProxyAuthorizationWithPrefixSubscriptionAuthMode() throws Except log.info("-- Exiting {} test --", methodName); } + @Test + void testGetStatus() throws Exception { + log.info("-- Starting {} test --", methodName); + final PulsarResources resource = new PulsarResources(new ZKMetadataStore(mockZooKeeper), + new ZKMetadataStore(mockZooKeeperGlobal)); + final AuthenticationService authService = new AuthenticationService( + PulsarConfigurationLoader.convertFrom(proxyConfig)); + final WebServer webServer = new WebServer(proxyConfig, authService); + ProxyServiceStarter.addWebServerHandlers(webServer, proxyConfig, proxyService, + new BrokerDiscoveryProvider(proxyConfig, resource)); + webServer.start(); + @Cleanup + final Client client = javax.ws.rs.client.ClientBuilder + .newClient(new ClientConfig().register(LoggingFeature.class)); + try { + final Response r = client.target(webServer.getServiceUri()).path("/status.html").request().get(); + Assert.assertEquals(r.getStatus(), Response.Status.OK.getStatusCode()); + } finally { + webServer.stop(); + } + log.info("-- Exiting {} test --", methodName); + } + @Test void testGetMetrics() throws Exception { log.info("-- Starting {} test --", methodName); diff --git a/pulsar-sql/pom.xml b/pulsar-sql/pom.xml index d027e1a56c578..8340226ae59a2 100644 --- a/pulsar-sql/pom.xml +++ b/pulsar-sql/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - pom - - org.apache.pulsar - pulsar - 3.1.0-SNAPSHOT - - - pulsar-sql - Pulsar SQL :: Parent - - - - 3.14.9 - - 1.17.2 - 213 - - - - - - - com.squareup.okhttp3 - okhttp - ${okhttp3.version} - - - com.squareup.okhttp3 - okhttp-urlconnection - ${okhttp3.version} - - - com.squareup.okhttp3 - logging-interceptor - ${okhttp3.version} - - - com.squareup.okio - okio - ${okio.version} - - - - - org.jline - jline-reader - ${jline3.version} - - - org.jline - jline-terminal - ${jline3.version} - - - org.jline - jline-terminal-jna - ${jline3.version} - - - - - org.slf4j - log4j-over-slf4j - ${slf4j.version} - - - org.slf4j - slf4j-jdk14 - ${slf4j.version} - - - - io.airlift - bom - ${airlift.version} - pom - import - - - - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - - - - - main - - - disableSqlMainProfile - - !true - - - - presto-pulsar - presto-pulsar-plugin - presto-distribution - - - - pulsar-sql-tests - - presto-pulsar - presto-pulsar-plugin - presto-distribution - - - + 3.14.9 + + 1.17.2 + 213 + + + + + + com.squareup.okhttp3 + okhttp + ${okhttp3.version} + + + com.squareup.okhttp3 + okhttp-urlconnection + ${okhttp3.version} + + + com.squareup.okhttp3 + logging-interceptor + ${okhttp3.version} + + + com.squareup.okio + okio + ${okio.version} + + + + org.jline + jline-reader + ${jline3.version} + + + org.jline + jline-terminal + ${jline3.version} + + + org.jline + jline-terminal-jna + ${jline3.version} + + + + org.slf4j + log4j-over-slf4j + ${slf4j.version} + + + org.slf4j + slf4j-jdk14 + ${slf4j.version} + + + io.airlift + bom + ${airlift.version} + pom + import + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + verify + + check + + + + + + + + + main + + + disableSqlMainProfile + + !true + + + + presto-pulsar + presto-pulsar-plugin + presto-distribution + + + + pulsar-sql-tests + + presto-pulsar + presto-pulsar-plugin + presto-distribution + + + - - owasp-dependency-check - - - - org.owasp - dependency-check-maven - ${dependency-check-maven.version} - - - - aggregate - - none - - - - - - - - + + owasp-dependency-check + + + + org.owasp + dependency-check-maven + ${dependency-check-maven.version} + + + + aggregate + + none + + + + + + + diff --git a/pulsar-sql/presto-distribution/LICENSE b/pulsar-sql/presto-distribution/LICENSE index 9afd6c98bc2ff..32fcf0437ffac 100644 --- a/pulsar-sql/presto-distribution/LICENSE +++ b/pulsar-sql/presto-distribution/LICENSE @@ -228,24 +228,24 @@ The Apache Software License, Version 2.0 - guice-5.1.0.jar * Apache Commons - commons-math3-3.6.1.jar - - commons-compress-1.21.jar + - commons-compress-1.26.0.jar - commons-lang3-3.11.jar * Netty - - netty-buffer-4.1.94.Final.jar - - netty-codec-4.1.94.Final.jar - - netty-codec-dns-4.1.94.Final.jar - - netty-codec-http-4.1.94.Final.jar - - netty-codec-haproxy-4.1.94.Final.jar - - netty-codec-socks-4.1.94.Final.jar - - netty-handler-proxy-4.1.94.Final.jar - - netty-common-4.1.94.Final.jar - - netty-handler-4.1.94.Final.jar + - netty-buffer-4.1.100.Final.jar + - netty-codec-4.1.100.Final.jar + - netty-codec-dns-4.1.100.Final.jar + - netty-codec-http-4.1.100.Final.jar + - netty-codec-haproxy-4.1.100.Final.jar + - netty-codec-socks-4.1.100.Final.jar + - netty-handler-proxy-4.1.100.Final.jar + - netty-common-4.1.100.Final.jar + - netty-handler-4.1.100.Final.jar - netty-reactive-streams-2.0.6.jar - - netty-resolver-4.1.94.Final.jar - - netty-resolver-dns-4.1.94.Final.jar - - netty-resolver-dns-classes-macos-4.1.94.Final.jar - - netty-resolver-dns-native-macos-4.1.94.Final-osx-aarch_64.jar - - netty-resolver-dns-native-macos-4.1.94.Final-osx-x86_64.jar + - netty-resolver-4.1.100.Final.jar + - netty-resolver-dns-4.1.100.Final.jar + - netty-resolver-dns-classes-macos-4.1.100.Final.jar + - netty-resolver-dns-native-macos-4.1.100.Final-osx-aarch_64.jar + - netty-resolver-dns-native-macos-4.1.100.Final-osx-x86_64.jar - netty-tcnative-boringssl-static-2.0.61.Final.jar - netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar - netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar @@ -253,24 +253,24 @@ The Apache Software License, Version 2.0 - netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar - netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar - netty-tcnative-classes-2.0.61.Final.jar - - netty-transport-4.1.94.Final.jar - - netty-transport-classes-epoll-4.1.94.Final.jar - - netty-transport-native-epoll-4.1.94.Final-linux-x86_64.jar - - netty-transport-native-unix-common-4.1.94.Final.jar - - netty-transport-native-unix-common-4.1.94.Final-linux-x86_64.jar - - netty-codec-http2-4.1.94.Final.jar + - netty-transport-4.1.100.Final.jar + - netty-transport-classes-epoll-4.1.100.Final.jar + - netty-transport-native-epoll-4.1.100.Final-linux-x86_64.jar + - netty-transport-native-unix-common-4.1.100.Final.jar + - netty-transport-native-unix-common-4.1.100.Final-linux-x86_64.jar + - netty-codec-http2-4.1.100.Final.jar - netty-incubator-transport-classes-io_uring-0.0.21.Final.jar - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar * GRPC - - grpc-api-1.45.1.jar - - grpc-context-1.45.1.jar - - grpc-core-1.45.1.jar - - grpc-grpclb-1.45.1.jar - - grpc-netty-1.45.1.jar - - grpc-protobuf-1.45.1.jar - - grpc-protobuf-lite-1.45.1.jar - - grpc-stub-1.45.1.jar + - grpc-api-1.55.3.jar + - grpc-context-1.55.3.jar + - grpc-core-1.55.3.jar + - grpc-grpclb-1.55.3.jar + - grpc-netty-1.55.3.jar + - grpc-protobuf-1.55.3.jar + - grpc-protobuf-lite-1.55.3.jar + - grpc-stub-1.55.3.jar * JEtcd - jetcd-api-0.7.5.jar - jetcd-common-0.7.5.jar @@ -283,24 +283,24 @@ The Apache Software License, Version 2.0 - joda-time-2.10.10.jar - failsafe-2.4.4.jar * Jetty - - http2-client-9.4.51.v20230217.jar - - http2-common-9.4.51.v20230217.jar - - http2-hpack-9.4.51.v20230217.jar - - http2-http-client-transport-9.4.51.v20230217.jar - - jetty-alpn-client-9.4.51.v20230217.jar - - http2-server-9.4.51.v20230217.jar - - jetty-alpn-java-client-9.4.51.v20230217.jar - - jetty-client-9.4.51.v20230217.jar - - jetty-http-9.4.51.v20230217.jar - - jetty-io-9.4.51.v20230217.jar - - jetty-jmx-9.4.51.v20230217.jar - - jetty-security-9.4.51.v20230217.jar - - jetty-server-9.4.51.v20230217.jar - - jetty-servlet-9.4.51.v20230217.jar - - jetty-util-9.4.51.v20230217.jar - - jetty-util-ajax-9.4.51.v20230217.jar + - http2-client-9.4.53.v20231009.jar + - http2-common-9.4.53.v20231009.jar + - http2-hpack-9.4.53.v20231009.jar + - http2-http-client-transport-9.4.53.v20231009.jar + - jetty-alpn-client-9.4.53.v20231009.jar + - http2-server-9.4.53.v20231009.jar + - jetty-alpn-java-client-9.4.53.v20231009.jar + - jetty-client-9.4.53.v20231009.jar + - jetty-http-9.4.53.v20231009.jar + - jetty-io-9.4.53.v20231009.jar + - jetty-jmx-9.4.53.v20231009.jar + - jetty-security-9.4.53.v20231009.jar + - jetty-server-9.4.53.v20231009.jar + - jetty-servlet-9.4.53.v20231009.jar + - jetty-util-9.4.53.v20231009.jar + - jetty-util-ajax-9.4.53.v20231009.jar * Byte Buddy - - byte-buddy-1.11.13.jar + - byte-buddy-1.14.12.jar * Apache BVal - bval-jsr-2.0.5.jar * Bytecode @@ -372,8 +372,8 @@ The Apache Software License, Version 2.0 * OpenCSV - opencsv-2.3.jar * Avro - - avro-1.10.2.jar - - avro-protobuf-1.10.2.jar + - avro-1.11.3.jar + - avro-protobuf-1.11.3.jar * Caffeine - caffeine-2.9.1.jar * Javax @@ -430,21 +430,21 @@ The Apache Software License, Version 2.0 - async-http-client-2.12.1.jar - async-http-client-netty-utils-2.12.1.jar * Apache Bookkeeper - - bookkeeper-common-4.16.2.jar - - bookkeeper-common-allocator-4.16.2.jar - - bookkeeper-proto-4.16.2.jar - - bookkeeper-server-4.16.2.jar - - bookkeeper-stats-api-4.16.2.jar - - bookkeeper-tools-framework-4.16.2.jar - - circe-checksum-4.16.2.jar - - codahale-metrics-provider-4.16.2.jar - - cpu-affinity-4.16.2.jar - - http-server-4.16.2.jar - - prometheus-metrics-provider-4.16.2.jar - - codahale-metrics-provider-4.16.2.jar - - bookkeeper-slogger-api-4.16.2.jar - - bookkeeper-slogger-slf4j-4.16.2.jar - - native-io-4.16.2.jar + - bookkeeper-common-4.16.4.jar + - bookkeeper-common-allocator-4.16.4.jar + - bookkeeper-proto-4.16.4.jar + - bookkeeper-server-4.16.4.jar + - bookkeeper-stats-api-4.16.4.jar + - bookkeeper-tools-framework-4.16.4.jar + - circe-checksum-4.16.4.jar + - codahale-metrics-provider-4.16.4.jar + - cpu-affinity-4.16.4.jar + - http-server-4.16.4.jar + - prometheus-metrics-provider-4.16.4.jar + - codahale-metrics-provider-4.16.4.jar + - bookkeeper-slogger-api-4.16.4.jar + - bookkeeper-slogger-slf4j-4.16.4.jar + - native-io-4.16.4.jar * Apache Commons - commons-cli-1.5.0.jar - commons-codec-1.15.jar @@ -458,7 +458,7 @@ The Apache Software License, Version 2.0 * JSON Simple - json-simple-1.1.1.jar * Snappy - - snappy-java-1.1.10.1.jar + - snappy-java-1.1.10.5.jar * Jackson - jackson-module-parameter-names-2.14.2.jar * Java Assist @@ -472,14 +472,12 @@ The Apache Software License, Version 2.0 - memory-0.8.3.jar - sketches-core-0.8.3.jar * Apache Zookeeper - - zookeeper-3.8.1.jar - - zookeeper-jute-3.8.1.jar + - zookeeper-3.9.1.jar + - zookeeper-jute-3.9.1.jar * Apache Yetus Audience Annotations - audience-annotations-0.12.0.jar * Perfmark - - perfmark-api-0.19.0.jar - * RabbitMQ Java Client - - amqp-client-5.5.3.jar + - perfmark-api-0.26.0.jar * Stream Lib - stream-2.9.5.jar * High Performance Primitive Collections for Java @@ -490,7 +488,7 @@ Protocol Buffers License * Protocol Buffers - protobuf-java-3.19.6.jar - protobuf-java-util-3.19.6.jar - - proto-google-common-protos-2.0.1.jar + - proto-google-common-protos-2.9.0.jar BSD 3-clause "New" or "Revised" License * RE2J TD -- re2j-td-1.4.jar diff --git a/pulsar-sql/presto-distribution/pom.xml b/pulsar-sql/presto-distribution/pom.xml index 1ac764a3c64d9..dc1e24ee490d1 100644 --- a/pulsar-sql/presto-distribution/pom.xml +++ b/pulsar-sql/presto-distribution/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar-sql 3.1.0-SNAPSHOT - pulsar-presto-distribution Pulsar SQL :: Pulsar Presto Distribution - false 2.34 @@ -42,7 +39,6 @@ 2.5.1 4.0.1 - org.glassfish.jersey.core @@ -74,7 +70,6 @@ jersey-client ${jersey.version} - io.trino trino-server-main @@ -103,13 +98,11 @@ - io.trino trino-cli ${trino.version} - io.airlift launcher @@ -117,7 +110,6 @@ tar.gz bin - io.airlift launcher @@ -125,13 +117,11 @@ tar.gz properties - org.objenesis objenesis ${objenesis.version} - com.twitter.common objectsize @@ -143,56 +133,44 @@ - - com.fasterxml.jackson.core jackson-core - com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.core jackson-annotations - com.fasterxml.jackson.datatype jackson-datatype-joda - com.fasterxml.jackson.dataformat jackson-dataformat-yaml - com.fasterxml.jackson.datatype jackson-datatype-guava - com.fasterxml.jackson.datatype jackson-datatype-jdk8 - com.fasterxml.jackson.datatype jackson-datatype-jsr310 - com.fasterxml.jackson.dataformat jackson-dataformat-smile - - @@ -205,7 +183,6 @@ netty 3.10.6.Final - org.apache.maven maven-core @@ -295,7 +272,6 @@ - @@ -305,7 +281,6 @@ ${skipBuildDistribution} - org.apache.maven.plugins maven-assembly-plugin @@ -329,7 +304,6 @@ - com.mycila license-maven-plugin @@ -354,7 +328,6 @@ - skipBuildDistributionDisabled diff --git a/pulsar-sql/presto-distribution/src/assembly/assembly.xml b/pulsar-sql/presto-distribution/src/assembly/assembly.xml index 96c0421c71515..8439d60ba339e 100644 --- a/pulsar-sql/presto-distribution/src/assembly/assembly.xml +++ b/pulsar-sql/presto-distribution/src/assembly/assembly.xml @@ -1,3 +1,4 @@ + - - bin - - tar.gz - dir - - false - - - ${basedir}/LICENSE - LICENSE - . - 644 - - - ${basedir}/src/main/resources/launcher.properties - launcher.properties - bin/ - 644 - - - - - ${basedir}/../presto-pulsar-plugin/target/pulsar-presto-connector/ - plugin/ - - - ${basedir}/src/main/resources/conf/ - conf/ - - - - - lib/ - true - runtime - - io.airlift:launcher:tar.gz:bin:${airlift.version} - io.airlift:launcher:tar.gz:properties:${airlift.version} - *:tar.gz - - - - - - io.airlift:launcher:tar.gz:bin:${airlift.version} - - true - 755 - - - - - io.airlift:launcher:tar.gz:properties:${airlift.version} - - true - - - \ No newline at end of file + + bin + + tar.gz + dir + + false + + + ${basedir}/LICENSE + LICENSE + . + 644 + + + ${basedir}/src/main/resources/launcher.properties + launcher.properties + bin/ + 644 + + + + + ${basedir}/../presto-pulsar-plugin/target/pulsar-presto-connector/ + plugin/ + + + ${basedir}/src/main/resources/conf/ + conf/ + + + + + lib/ + true + runtime + + io.airlift:launcher:tar.gz:bin:${airlift.version} + io.airlift:launcher:tar.gz:properties:${airlift.version} + *:tar.gz + + + + + + io.airlift:launcher:tar.gz:bin:${airlift.version} + + true + 755 + + + + + io.airlift:launcher:tar.gz:properties:${airlift.version} + + true + + + diff --git a/pulsar-sql/presto-pulsar-plugin/pom.xml b/pulsar-sql/presto-pulsar-plugin/pom.xml index 5e88a24bba530..3b8616ed318cd 100644 --- a/pulsar-sql/presto-pulsar-plugin/pom.xml +++ b/pulsar-sql/presto-pulsar-plugin/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - - org.apache.pulsar - pulsar-sql - 3.1.0-SNAPSHOT - - - pulsar-presto-connector - Pulsar SQL :: Pulsar Presto Connector - - - - - ${project.groupId} - pulsar-presto-connector-original - ${project.version} - - - - ${project.groupId} - bouncy-castle-bc - ${project.version} - pkg - true - - - - - - - org.apache.maven.plugins - maven-assembly-plugin - 3.3.0 - - false - true - posix - - src/assembly/assembly.xml - - - - - package - package - - single - - - - - - - + + 4.0.0 + + io.streamnative + pulsar-sql + 3.1.0-SNAPSHOT + + pulsar-presto-connector + Pulsar SQL :: Pulsar Presto Connector + + + ${project.groupId} + pulsar-presto-connector-original + ${project.version} + + + ${project.groupId} + bouncy-castle-bc + ${project.version} + pkg + true + + + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.3.0 + + false + true + posix + + src/assembly/assembly.xml + + + + + package + package + + single + + + + + + org.apache.maven.plugins + maven-jar-plugin + 3.2.0 + + + default-jar + package + + jar + + + + javadoc-jar + package + + jar + + + javadoc + + + + + + diff --git a/pulsar-sql/presto-pulsar-plugin/src/assembly/assembly.xml b/pulsar-sql/presto-pulsar-plugin/src/assembly/assembly.xml index 6650abfda3fc3..649baf7318003 100644 --- a/pulsar-sql/presto-pulsar-plugin/src/assembly/assembly.xml +++ b/pulsar-sql/presto-pulsar-plugin/src/assembly/assembly.xml @@ -1,3 +1,4 @@ + - - bin - - tar.gz - dir - - - - / - false - runtime - - jakarta.ws.rs:jakarta.ws.rs-api - - - - \ No newline at end of file + + bin + + tar.gz + dir + + + + / + false + runtime + + jakarta.ws.rs:jakarta.ws.rs-api + + + + diff --git a/pulsar-sql/presto-pulsar/pom.xml b/pulsar-sql/presto-pulsar/pom.xml index 3ff073f62e459..235445ade9db6 100644 --- a/pulsar-sql/presto-pulsar/pom.xml +++ b/pulsar-sql/presto-pulsar/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - - org.apache.pulsar - pulsar-sql - 3.1.0-SNAPSHOT - - - pulsar-presto-connector-original - Pulsar SQL - Pulsar Presto Connector - Pulsar SQL :: Pulsar Presto Connector Packaging - - - 2.1.2 - 1.8.4 - - - - - io.airlift - bootstrap - - - org.apache.logging.log4j - log4j-to-slf4j - - - - - io.airlift - json - - - - org.apache.avro - avro - ${avro.version} - - - - ${project.groupId} - pulsar-client-admin-original - ${project.version} - - - - ${project.groupId} - managed-ledger - ${project.version} - - - - org.jctools - jctools-core - ${jctools.version} - - - - com.dslplatform - dsl-json - ${dslJson.verson} - - - - io.trino - trino-plugin-toolkit - ${trino.version} - - - - - io.trino - trino-spi - ${trino.version} - provided - - - - joda-time - joda-time - ${joda.version} - - - - io.trino - trino-record-decoder - ${trino.version} - - - - javax.annotation - javax.annotation-api - ${javax.annotation-api.version} - - - - io.jsonwebtoken - jjwt-impl - ${jsonwebtoken.version} - test - - - - io.trino - trino-main - ${trino.version} - test - - - - io.trino - trino-testing - ${trino.version} - test - - - - org.apache.pulsar + + 4.0.0 + + io.streamnative + pulsar-sql + 3.1.0-SNAPSHOT + + pulsar-presto-connector-original + Pulsar SQL - Pulsar Presto Connector + Pulsar SQL :: Pulsar Presto Connector Packaging + + 2.1.2 + 1.8.4 + + + + io.airlift + bootstrap + + + org.apache.logging.log4j + log4j-to-slf4j + + + + + io.airlift + json + + + org.apache.avro + avro + ${avro.version} + + + ${project.groupId} + pulsar-client-admin-original + ${project.version} + + + ${project.groupId} + managed-ledger + ${project.version} + + + org.jctools + jctools-core + ${jctools.version} + + + com.dslplatform + dsl-json + ${dslJson.verson} + + + io.trino + trino-plugin-toolkit + ${trino.version} + + + + io.trino + trino-spi + ${trino.version} + provided + + + joda-time + joda-time + ${joda.version} + + + io.trino + trino-record-decoder + ${trino.version} + + + javax.annotation + javax.annotation-api + ${javax.annotation-api.version} + + + io.jsonwebtoken + jjwt-impl + ${jsonwebtoken.version} + test + + + io.trino + trino-main + ${trino.version} + test + + + io.trino + trino-testing + ${trino.version} + test + + + io.streamnative + pulsar-broker + ${project.version} + test + + + io.streamnative + testmocks + ${project.version} + test + + + org.eclipse.jetty + jetty-http + ${jetty.version} + test + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + ${shadePluginPhase} + + shade + + + true + true + + + io.streamnative:pulsar-client-original + io.streamnative:pulsar-client-admin-original + io.streamnative:managed-ledger + io.streamnative:pulsar-metadata + org.glassfish.jersey*:* + javax.ws.rs:* + javax.annotation:* + org.glassfish.hk2*:* + org.eclipse.jetty:* + + + + + io.streamnative:pulsar-client-original + + ** + + + + org/bouncycastle/** + + + + + + org.glassfish + org.apache.pulsar.shade.org.glassfish + + + javax.ws + org.apache.pulsar.shade.javax.ws + + + javax.annotation + org.apache.pulsar.shade.javax.annotation + + + jersey + org.apache.pulsar.shade.jersey + + + org.eclipse.jetty + org.apache.pulsar.shade.org.eclipse.jetty + + + + + + + + + + + + + + + + test-jar-dependencies + + + maven.test.skip + !true + + + + + ${project.groupId} pulsar-broker ${project.version} + test-jar test - - - org.apache.pulsar - testmocks - ${project.version} - test - - - - org.eclipse.jetty - jetty-http - ${jetty.version} - test - - - - - - - - org.apache.maven.plugins - maven-shade-plugin - - - ${shadePluginPhase} - - shade - - - true - true - - - - org.apache.pulsar:pulsar-client-original - org.apache.pulsar:pulsar-client-admin-original - org.apache.pulsar:managed-ledger - org.apache.pulsar:pulsar-metadata - - org.glassfish.jersey*:* - javax.ws.rs:* - javax.annotation:* - org.glassfish.hk2*:* - - org.eclipse.jetty:* - - - - - - org.apache.pulsar:pulsar-client-original - - ** - - - - org/bouncycastle/** - - - - - - org.glassfish - org.apache.pulsar.shade.org.glassfish - - - javax.ws - org.apache.pulsar.shade.javax.ws - - - javax.annotation - org.apache.pulsar.shade.javax.annotation - - - jersey - org.apache.pulsar.shade.jersey - - - org.eclipse.jetty - org.apache.pulsar.shade.org.eclipse.jetty - - - - - - - - - - - - - - - - - - test-jar-dependencies - - - maven.test.skip - !true - - - - - ${project.groupId} - pulsar-broker - ${project.version} - test-jar - test - - - - + + + diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java index 858624b156dbb..6663b349761a3 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java @@ -776,7 +776,7 @@ private void initEntryCacheSizeAllocator(PulsarConnectorConfig connectorConfig) connectorConfig.getMaxSplitQueueSizeBytes() / 2); this.messageQueueCacheSizeAllocator = new NoStrictCacheSizeAllocator( connectorConfig.getMaxSplitQueueSizeBytes() / 2); - log.info("Init cacheSizeAllocator with maxSplitEntryQueueSizeBytes {}.", + log.info("Init cacheSizeAllocator with maxSplitEntryQueueSizeBytes %d.", connectorConfig.getMaxSplitQueueSizeBytes()); } else { this.entryQueueCacheSizeAllocator = new NullCacheSizeAllocator(); diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java index e2d030d2d7f1b..673ea2b3940d9 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java @@ -74,7 +74,7 @@ public CompletableFuture getSchemaByVersion(byte[] schemaVersion) { } return cache.get(BytesSchemaVersion.of(schemaVersion)); } catch (ExecutionException e) { - LOG.error("Can't get generic schema for topic {} schema version {}", + LOG.error("Can't get generic schema for topic %s schema version %s", topicName.toString(), new String(schemaVersion, StandardCharsets.UTF_8), e); return FutureUtil.failedFuture(e.getCause()); } diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java index 73081f8948a51..1672d5f144817 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java @@ -54,6 +54,7 @@ import io.trino.spi.type.Timestamps; import io.trino.spi.type.TinyintType; import io.trino.spi.type.Type; +import io.trino.spi.type.UuidType; import io.trino.spi.type.VarbinaryType; import io.trino.spi.type.VarcharType; import java.math.BigInteger; @@ -61,6 +62,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; import org.apache.avro.generic.GenericEnumSymbol; import org.apache.avro.generic.GenericFixed; import org.apache.avro.generic.GenericRecord; @@ -87,7 +89,8 @@ public class PulsarAvroColumnDecoder { TimestampType.TIMESTAMP_MILLIS, DateType.DATE, TimeType.TIME_MILLIS, - VarbinaryType.VARBINARY); + VarbinaryType.VARBINARY, + UuidType.UUID); private final Type columnType; private final String columnMapping; @@ -255,6 +258,10 @@ private static Slice getSlice(Object value, Type type, String columnName) { } } + if (type instanceof UuidType) { + return UuidType.javaUuidToTrinoUuid(UUID.fromString(value.toString())); + } + throw new TrinoException(DECODER_CONVERSION_NOT_SUPPORTED, format("cannot decode object of '%s' as '%s' for column '%s'", value.getClass(), type, columnName)); diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java index 3072bf9441b2c..e6eb6b7f2f947 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java @@ -44,6 +44,7 @@ import io.trino.spi.type.TypeManager; import io.trino.spi.type.TypeSignature; import io.trino.spi.type.TypeSignatureParameter; +import io.trino.spi.type.UuidType; import io.trino.spi.type.VarbinaryType; import io.trino.spi.type.VarcharType; import java.util.List; @@ -121,6 +122,10 @@ private Type parseAvroPrestoType(String fieldName, Schema schema) { LogicalType logicalType = schema.getLogicalType(); switch (type) { case STRING: + if (logicalType != null && logicalType.equals(LogicalTypes.uuid())) { + return UuidType.UUID; + } + return createUnboundedVarcharType(); case ENUM: return createUnboundedVarcharType(); case NULL: diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java index 905e3bd6becb4..8e744e3b1229c 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java @@ -58,6 +58,7 @@ import io.trino.spi.type.Timestamps; import io.trino.spi.type.TinyintType; import io.trino.spi.type.Type; +import io.trino.spi.type.UuidType; import io.trino.spi.type.VarbinaryType; import io.trino.spi.type.VarcharType; import java.util.Iterator; @@ -126,7 +127,8 @@ private boolean isSupportedType(Type type) { TimestampType.TIMESTAMP_MILLIS, DateType.DATE, TimeType.TIME_MILLIS, - RealType.REAL + RealType.REAL, + UuidType.UUID ).contains(type)) { return true; } diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java index 0d5cc2d262dfe..737eb608d82d6 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java @@ -44,6 +44,7 @@ import io.trino.spi.type.TypeManager; import io.trino.spi.type.TypeSignature; import io.trino.spi.type.TypeSignatureParameter; +import io.trino.spi.type.UuidType; import io.trino.spi.type.VarbinaryType; import io.trino.spi.type.VarcharType; import java.util.List; @@ -121,6 +122,10 @@ private Type parseJsonPrestoType(String fieldName, Schema schema) { LogicalType logicalType = schema.getLogicalType(); switch (type) { case STRING: + if (logicalType != null && logicalType.equals(LogicalTypes.uuid())) { + return UuidType.UUID; + } + return createUnboundedVarcharType(); case ENUM: return createUnboundedVarcharType(); case NULL: diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarAuth.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarAuth.java index 9119ffed4e28f..7b550b7270f37 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarAuth.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarAuth.java @@ -38,6 +38,7 @@ import org.apache.pulsar.client.admin.PulsarAdminBuilder; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.AuthenticationFactory; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; import org.apache.pulsar.common.policies.data.AuthAction; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; @@ -63,6 +64,9 @@ public void setup() throws Exception { conf.setProperties(properties); conf.setSuperUserRoles(Sets.newHashSet(SUPER_USER_ROLE)); conf.setClusterName("c1"); + conf.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); + conf.setBrokerClientAuthenticationParameters("token:" + AuthTokenUtils + .createToken(secretKey, SUPER_USER_ROLE, Optional.empty())); internalSetup(); admin.clusters().createCluster("c1", ClusterData.builder().build()); diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java index 4561282c67196..0dec76b3d4dec 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java @@ -19,6 +19,7 @@ package org.apache.pulsar.sql.presto.decoder; import java.math.BigDecimal; +import java.util.UUID; import lombok.Data; import java.util.List; @@ -55,6 +56,9 @@ public static enum TestEnum { public Map mapField; public CompositeRow compositeRow; + @org.apache.avro.reflect.AvroSchema("{\"type\":\"string\",\"logicalType\":\"uuid\"}") + public UUID uuidField; + public static class TestRow { public String stringField; public int intField; diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java index c4e7009b9465b..5f9df96619b9f 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java @@ -44,6 +44,7 @@ import io.trino.spi.type.Timestamps; import io.trino.spi.type.Type; import io.trino.spi.type.TypeSignatureParameter; +import io.trino.spi.type.UuidType; import io.trino.spi.type.VarcharType; import java.math.BigDecimal; import java.time.LocalDate; @@ -55,6 +56,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.UUID; import org.apache.pulsar.client.impl.schema.AvroSchema; import org.apache.pulsar.client.impl.schema.generic.GenericAvroRecord; import org.apache.pulsar.client.impl.schema.generic.GenericAvroSchema; @@ -90,6 +92,7 @@ public void testPrimitiveType() { message.longField = 222L; message.timestampField = System.currentTimeMillis(); message.enumField = DecoderTestMessage.TestEnum.TEST_ENUM_1; + message.uuidField = UUID.randomUUID(); LocalTime now = LocalTime.now(ZoneId.systemDefault()); message.timeField = now.toSecondOfDay() * 1000; @@ -137,6 +140,10 @@ public void testPrimitiveType() { PulsarColumnHandle timeFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(), "timeField", TIME_MILLIS, false, false, "timeField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE); checkValue(decodedRow, timeFieldColumnHandle, (long) message.timeField * Timestamps.PICOSECONDS_PER_MILLISECOND); + + PulsarColumnHandle uuidHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(), + "uuidField", UuidType.UUID, false, false, "uuidField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE); + checkValue(decodedRow, uuidHandle, UuidType.javaUuidToTrinoUuid(message.uuidField)); } @Test diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java index 4afad9b318fc5..32e71a53444cf 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java @@ -44,6 +44,7 @@ import io.trino.spi.type.Timestamps; import io.trino.spi.type.Type; import io.trino.spi.type.TypeSignatureParameter; +import io.trino.spi.type.UuidType; import io.trino.spi.type.VarcharType; import java.math.BigDecimal; import java.time.LocalDate; @@ -55,6 +56,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.UUID; import org.apache.pulsar.client.impl.schema.JSONSchema; import org.apache.pulsar.client.impl.schema.generic.GenericJsonRecord; import org.apache.pulsar.client.impl.schema.generic.GenericJsonSchema; @@ -98,6 +100,8 @@ public void testPrimitiveType() { LocalDate epoch = LocalDate.ofEpochDay(0); message.dateField = Math.toIntExact(ChronoUnit.DAYS.between(epoch, localDate)); + message.uuidField = UUID.randomUUID(); + ByteBuf payload = io.netty.buffer.Unpooled .copiedBuffer(schema.encode(message)); Map decodedRow = pulsarRowDecoder.decodeRow(payload).get(); @@ -137,6 +141,10 @@ public void testPrimitiveType() { PulsarColumnHandle timeFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(), "timeField", TIME_MILLIS, false, false, "timeField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE); checkValue(decodedRow, timeFieldColumnHandle, (long) message.timeField * Timestamps.PICOSECONDS_PER_MILLISECOND); + + PulsarColumnHandle uuidHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(), + "uuidField", UuidType.UUID, false, false, "uuidField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE); + checkValue(decodedRow, uuidHandle, message.uuidField.toString()); } @Test diff --git a/pulsar-testclient/pom.xml b/pulsar-testclient/pom.xml index ce03dd444e101..dce6cf0c19b63 100644 --- a/pulsar-testclient/pom.xml +++ b/pulsar-testclient/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - org.apache.pulsar - pulsar - 3.1.0-SNAPSHOT - .. - - - pulsar-testclient - Pulsar Test Client - Pulsar Test Client - - - - ${project.groupId} - testmocks - ${project.version} - test - - - - org.apache.zookeeper - zookeeper - - - ch.qos.logback - logback-core - - - ch.qos.logback - logback-classic - - - io.netty - netty-tcnative - - - - - - ${project.groupId} - pulsar-client-admin-original - ${project.version} - - - - ${project.groupId} - pulsar-client-original - ${project.version} - - - - ${project.groupId} - pulsar-client-messagecrypto-bc - ${project.version} - true - - - - ${project.groupId} - pulsar-broker - ${project.version} - - - - commons-configuration - commons-configuration - - - - com.beust - jcommander - compile - - - - org.hdrhistogram - HdrHistogram - - - - com.fasterxml.jackson.core - jackson-databind - - - - org.awaitility - awaitility - test - - - - com.github.tomakehurst - wiremock-jre8 - ${wiremock.version} - test - - - - - - - - - test-jar-dependencies - - - maven.test.skip - !true - - - - - ${project.groupId} - pulsar-broker - ${project.version} - test-jar - test - - - - - - - - - org.gaul - modernizer-maven-plugin - - true - 8 - - - - modernizer - verify - - modernizer - - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - + + 4.0.0 + + io.streamnative + pulsar + 3.1.0-SNAPSHOT + .. + + pulsar-testclient + Pulsar Test Client + Pulsar Test Client + + + ${project.groupId} + testmocks + ${project.version} + test + + + org.apache.zookeeper + zookeeper + + + ch.qos.logback + logback-core + + + ch.qos.logback + logback-classic + + + io.netty + netty-tcnative + + + + + ${project.groupId} + pulsar-client-admin-original + ${project.version} + + + ${project.groupId} + pulsar-client-original + ${project.version} + + + ${project.groupId} + pulsar-client-messagecrypto-bc + ${project.version} + true + + + ${project.groupId} + pulsar-broker + ${project.version} + + + commons-configuration + commons-configuration + + + com.beust + jcommander + compile + + + org.hdrhistogram + HdrHistogram + + + com.fasterxml.jackson.core + jackson-databind + + + org.awaitility + awaitility + test + + + com.github.tomakehurst + wiremock-jre8 + ${wiremock.version} + test + + + + + + test-jar-dependencies + + + maven.test.skip + !true + + + + + ${project.groupId} + pulsar-broker + ${project.version} + test-jar + test + + + + + + + + org.gaul + modernizer-maven-plugin + + true + 8 + + + + modernizer + verify + + modernizer + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + verify + + check + + + + + + diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/LoadSimulationController.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/LoadSimulationController.java index bbe535df5e289..3dc7008a41a49 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/LoadSimulationController.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/LoadSimulationController.java @@ -60,7 +60,7 @@ */ public class LoadSimulationController { private static final Logger log = LoggerFactory.getLogger(LoadSimulationController.class); - private static final String QUOTA_ROOT = "/loadbalance/resource-quota/namespace"; + private static final String QUOTA_ROOT = "/loadbalance/resource-quota"; private static final String BUNDLE_DATA_ROOT = "/loadbalance/bundle-data"; // Input streams for each client to send commands through. diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java index f9e5d5ee7e6e1..b312ceb6e3eff 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerfClientUtils.java @@ -30,7 +30,6 @@ import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.client.api.SizeUnit; import org.apache.pulsar.common.util.DirectMemoryUtils; import org.slf4j.Logger; @@ -67,7 +66,6 @@ public static ClientBuilder createClientBuilderFromArguments(PerformanceBaseArgu throws PulsarClientException.UnsupportedAuthenticationException { ClientBuilder clientBuilder = PulsarClient.builder() - .memoryLimit(0, SizeUnit.BYTES) .serviceUrl(arguments.serviceURL) .connectionsPerBroker(arguments.maxConnections) .ioThreads(arguments.ioThreads) diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java index 63e3e2ec6fd23..8b5775f67818f 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java @@ -566,7 +566,7 @@ private static void runProducer(int producerId, } } // Send messages on all topics/producers - long totalSent = 0; + AtomicLong totalSent = new AtomicLong(0); AtomicLong numMessageSend = new AtomicLong(0); Semaphore numMsgPerTxnLimit = new Semaphore(arguments.numMessagesPerTransaction); while (true) { @@ -586,7 +586,7 @@ private static void runProducer(int producerId, } if (numMessages > 0) { - if (totalSent++ >= numMessages) { + if (totalSent.get() >= numMessages) { log.info("------------- DONE (reached the maximum number: {} of production) --------------" , numMessages); doneLatch.countDown(); @@ -604,7 +604,7 @@ private static void runProducer(int producerId, if (arguments.payloadFilename != null) { if (messageFormatter != null) { - payloadData = messageFormatter.formatMessage(arguments.producerName, totalSent, + payloadData = messageFormatter.formatMessage(arguments.producerName, totalSent.get(), payloadByteList.get(ThreadLocalRandom.current().nextInt(payloadByteList.size()))); } else { payloadData = payloadByteList.get( @@ -642,13 +642,13 @@ private static void runProducer(int producerId, if (msgKeyMode == MessageKeyGenerationMode.random) { messageBuilder.key(String.valueOf(ThreadLocalRandom.current().nextInt())); } else if (msgKeyMode == MessageKeyGenerationMode.autoIncrement) { - messageBuilder.key(String.valueOf(totalSent)); + messageBuilder.key(String.valueOf(totalSent.get())); } PulsarClient pulsarClient = client; messageBuilder.sendAsync().thenRun(() -> { bytesSent.add(payloadData.length); messagesSent.increment(); - + totalSent.incrementAndGet(); totalMessagesSent.increment(); totalBytesSent.add(payloadData.length); diff --git a/pulsar-transaction/common/pom.xml b/pulsar-transaction/common/pom.xml index b2594bf26ade5..6ab440cf883fe 100644 --- a/pulsar-transaction/common/pom.xml +++ b/pulsar-transaction/common/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar - pulsar-transaction-parent - 3.1.0-SNAPSHOT - - - pulsar-transaction-common - Pulsar Transaction :: Common - - + + 4.0.0 + + io.streamnative + pulsar-transaction-parent + 3.1.0-SNAPSHOT + + pulsar-transaction-common + Pulsar Transaction :: Common + - - - - org.gaul - modernizer-maven-plugin - - true - 8 - - - - modernizer - verify - - modernizer - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - + + + + org.gaul + modernizer-maven-plugin + + true + 8 + + + + modernizer + verify + + modernizer + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + diff --git a/pulsar-transaction/common/src/main/resources/findbugsExclude.xml b/pulsar-transaction/common/src/main/resources/findbugsExclude.xml index 07f4609cfff29..47c3d73af5f06 100644 --- a/pulsar-transaction/common/src/main/resources/findbugsExclude.xml +++ b/pulsar-transaction/common/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - - org.apache.pulsar - pulsar-transaction-parent - 3.1.0-SNAPSHOT - - - pulsar-transaction-coordinator - Pulsar Transaction :: Coordinator - - - - - ${project.groupId} - pulsar-common - ${project.version} - - - - ${project.groupId} - managed-ledger - ${project.version} - - - - ${project.groupId} - testmocks - ${project.version} - test - - - - org.awaitility - awaitility - test - - - - - - - - org.gaul - modernizer-maven-plugin - - true - 8 - - - - modernizer - verify - - modernizer - - - - - - org.codehaus.mojo - properties-maven-plugin - - - initialize - - set-system-properties - - - - - proto_path - ${project.parent.parent.basedir} - - - proto_search_strategy - 2 - - - - - - - - com.github.splunk.lightproto - lightproto-maven-plugin - ${lightproto-maven-plugin.version} - - - - generate - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - + + 4.0.0 + + io.streamnative + pulsar-transaction-parent + 3.1.0-SNAPSHOT + + pulsar-transaction-coordinator + Pulsar Transaction :: Coordinator + + + ${project.groupId} + pulsar-common + ${project.version} + + + ${project.groupId} + managed-ledger + ${project.version} + + + ${project.groupId} + testmocks + ${project.version} + test + + + org.awaitility + awaitility + test + + + + + + org.gaul + modernizer-maven-plugin + + true + 8 + + + + modernizer + verify + + modernizer + + + + + + org.codehaus.mojo + properties-maven-plugin + + + initialize + + set-system-properties + + + + + proto_path + ${project.parent.parent.basedir} + + + proto_search_strategy + 2 + + + + + + + + com.github.splunk.lightproto + lightproto-maven-plugin + ${lightproto-maven-plugin.version} + + + + generate + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/src/main/resources/findbugsExclude.xml + + + + diff --git a/pulsar-transaction/coordinator/src/main/resources/findbugsExclude.xml b/pulsar-transaction/coordinator/src/main/resources/findbugsExclude.xml index a81fce11f4d21..f1cad02b28a88 100644 --- a/pulsar-transaction/coordinator/src/main/resources/findbugsExclude.xml +++ b/pulsar-transaction/coordinator/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT - pulsar-transaction-parent Pulsar Transaction :: Parent - common coordinator - - - com.github.spotbugs spotbugs-maven-plugin @@ -68,7 +63,5 @@ - - diff --git a/pulsar-websocket/pom.xml b/pulsar-websocket/pom.xml index bf8f38f2da1af..bb5e5707af6b8 100644 --- a/pulsar-websocket/pom.xml +++ b/pulsar-websocket/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - pulsar-websocket Pulsar WebSocket - ${project.groupId} pulsar-broker-common ${project.version} - ${project.groupId} pulsar-client-original ${project.version} - ${project.groupId} managed-ledger ${project.parent.version} test - ${project.groupId} pulsar-docs-tools @@ -63,57 +57,48 @@ - org.apache.commons commons-lang3 - org.glassfish.jersey.containers jersey-container-servlet-core - org.glassfish.jersey.containers jersey-container-servlet - org.glassfish.jersey.inject jersey-hk2 - com.google.code.gson gson - io.swagger swagger-core provided - com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider - - + org.eclipse.jetty.websocket websocket-api ${jetty.version} - - + org.eclipse.jetty.websocket websocket-server ${jetty.version} - + org.eclipse.jetty.websocket javax-websocket-client-impl @@ -128,9 +113,7 @@ org.hdrhistogram HdrHistogram - - @@ -150,7 +133,6 @@ - com.github.spotbugs spotbugs-maven-plugin @@ -168,7 +150,6 @@ - org.apache.maven.plugins maven-checkstyle-plugin diff --git a/pulsar-websocket/src/main/resources/findbugsExclude.xml b/pulsar-websocket/src/main/resources/findbugsExclude.xml index c96e63cdfccee..e8f324925188b 100644 --- a/pulsar-websocket/src/main/resources/findbugsExclude.xml +++ b/pulsar-websocket/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + @@ -204,4 +204,4 @@ - \ No newline at end of file + diff --git a/src/assembly-source-package.xml b/src/assembly-source-package.xml index 00f00bfe3a5c7..bae52588edcf6 100644 --- a/src/assembly-source-package.xml +++ b/src/assembly-source-package.xml @@ -31,7 +31,7 @@ . - + true @@ -39,14 +39,11 @@ src/*.py docker/pulsar/scripts/*.sh docker/pulsar/scripts/*.py - data/** logs/** - %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/).*${project.build.directory}.*] - - %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?maven-eclipse\.xml] %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?\.project] @@ -72,12 +68,10 @@ %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?\.externalToolBuilders(/.*)?] %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?\.deployables(/.*)?] %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?\.wtpmodules(/.*)?] - %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?cobertura\.ser] %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?pom\.xml\.versionsBackup] %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?dependency-reduced-pom\.xml] - %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?pom\.xml\.releaseBackup] %regex[(?!((?!${project.build.directory}/)[^/]+/)*src/)(.*/)?release\.properties] @@ -86,7 +80,7 @@ ${project.build.directory}/maven-shared-archive-resources/META-INF - + src diff --git a/src/check-binary-license.sh b/src/check-binary-license.sh index 3a6d266345f30..9c6a4d3223a70 100755 --- a/src/check-binary-license.sh +++ b/src/check-binary-license.sh @@ -61,7 +61,7 @@ EXIT=0 # Check all bundled jars are mentioned in LICENSE for J in $JARS; do - echo $J | grep -q "org.apache.pulsar" + echo $J | grep -q "io.streamnative" if [ $? == 0 ]; then continue fi @@ -104,7 +104,7 @@ if [ "$NO_PRESTO" -ne 1 ]; then for J in $JARS; do - echo $J | grep -q "org.apache.pulsar" + echo $J | grep -q "io.streamnative" if [ $? == 0 ]; then continue fi diff --git a/src/findbugs-exclude.xml b/src/findbugs-exclude.xml index 8f289b83a7be1..de4d3f7a93371 100644 --- a/src/findbugs-exclude.xml +++ b/src/findbugs-exclude.xml @@ -1,4 +1,4 @@ - + - - - - - - - - - - - - - + + + + + + + + + + + + diff --git a/src/idea-code-style.xml b/src/idea-code-style.xml index 810dee34a3494..b49c523c322b2 100644 --- a/src/idea-code-style.xml +++ b/src/idea-code-style.xml @@ -1,3 +1,4 @@ + - - - - - \ No newline at end of file + diff --git a/src/owasp-dependency-check-false-positives.xml b/src/owasp-dependency-check-false-positives.xml index 345be8f4d2c06..151752c98f7a1 100644 --- a/src/owasp-dependency-check-false-positives.xml +++ b/src/owasp-dependency-check-false-positives.xml @@ -1,4 +1,4 @@ - + - - + + file name: zookeeper-3.8.0.jar - ]]> + e395c1d8a71557b7569cc6a83487b2e30e2e58fe CVE-2021-28164 - file name: zookeeper-3.8.0.jar - ]]> + e395c1d8a71557b7569cc6a83487b2e30e2e58fe CVE-2021-29425 - file name: zookeeper-3.8.0.jar - ]]> + e395c1d8a71557b7569cc6a83487b2e30e2e58fe CVE-2021-34429 - file name: zookeeper-prometheus-metrics-3.8.0.jar - ]]> + 849e8ece2845cb0185d721233906d487a7f1e4cf CVE-2021-28164 - file name: zookeeper-prometheus-metrics-3.8.0.jar - ]]> + 849e8ece2845cb0185d721233906d487a7f1e4cf CVE-2021-29425 - file name: zookeeper-prometheus-metrics-3.8.0.jar - ]]> + 849e8ece2845cb0185d721233906d487a7f1e4cf CVE-2021-34429 - file name: zookeeper-jute-3.8.0.jar - ]]> + 6560f966bcf1aa78d27bcfa75fb6c4463a72c6c5 CVE-2021-28164 - file name: zookeeper-jute-3.8.0.jar - ]]> + 6560f966bcf1aa78d27bcfa75fb6c4463a72c6c5 CVE-2021-29425 - file name: zookeeper-jute-3.8.0.jar - ]]> + 6560f966bcf1aa78d27bcfa75fb6c4463a72c6c5 CVE-2021-34429 - - file name: debezium-connector-postgres-1.7.2.Final.jar - ]]> + 69c1edfa7d89531af511fcd07e8516fa450f746a CVE-2021-23214 - - - + - file name: mariadb-java-client-2.7.5.jar - ]]> + 9dd29797ecabe7d2e7fa892ec6713a5552cfcc59 CVE-2022-27376 CVE-2022-27377 @@ -162,43 +159,36 @@ CVE-2022-27386 CVE-2022-27387 - - file name: google-http-client-gson-1.41.0.jar - ]]> + 1a754a5dd672218a2ac667d7ff2b28df7a5a240e CVE-2022-25647 - commons-net is not used at all and therefore commons-net vulnerability CVE-2021-37533 is a false positive. CVE-2021-37533 - fredsmith utils library is not used at all. CVE-2021-4277 is a false positive. CVE-2021-4277 - It treat pulsar-io-kafka-connect-adaptor as a lib of Kafka, CVE-2021-25194 is a false positive. CVE-2023-25194 - It treat pulsar-io-kafka-connect-adaptor as a lib of Kafka, CVE-2021-34917 is a false positive. CVE-2022-34917 - yaml_project is not used at all. Any CVEs reported for yaml_project are false positives. cpe:/a:yaml_project:yaml - flat_project is not used at all. cpe:/a:flat_project:flat - \ No newline at end of file + diff --git a/src/owasp-dependency-check-suppressions.xml b/src/owasp-dependency-check-suppressions.xml index d5ddc28e884cb..7e0e54d9faab9 100644 --- a/src/owasp-dependency-check-suppressions.xml +++ b/src/owasp-dependency-check-suppressions.xml @@ -1,4 +1,4 @@ - + - - - Ignore netty CVEs in GRPC shaded Netty. - .*grpc-netty-shaded.* - cpe:/a:netty:netty - - - Suppress all pulsar-presto-distribution vulnerabilities - .*pulsar-presto-distribution-.* - .* - - - Suppress libthrift-0.12.0.jar vulnerabilities - org.apache.thrift:libthrift:0.12.0 - .* - - - - - + + Ignore netty CVEs in GRPC shaded Netty. + .*grpc-netty-shaded.* + cpe:/a:netty:netty + + + Suppress all pulsar-presto-distribution vulnerabilities + .*pulsar-presto-distribution-.* + .* + + + Suppress libthrift-0.12.0.jar vulnerabilities + org.apache.thrift:libthrift:0.12.0 + .* + + + + file name: msgpack-core-0.9.0.jar - ]]> - 87d9ce0b22de48428fa32bb8ad476e18b6969548 - CVE-2022-41719 - - - - - + 87d9ce0b22de48428fa32bb8ad476e18b6969548 + CVE-2022-41719 + + + + file name: elasticsearch-java-8.1.0.jar CVE-2022-23712 is only related to Elastic server. - ]]> - edf5be04cbc2eafc51540ba33f9536e788b9d60b - CVE-2022-23712 - - - + edf5be04cbc2eafc51540ba33f9536e788b9d60b + CVE-2022-23712 + + + file name: elasticsearch-rest-client-8.1.0.jar CVE-2022-23712 is only related to Elastic server. - ]]> - 10e7aa09f10955a074c0a574cb699344d2745df1 - CVE-2022-23712 - - - - - + 10e7aa09f10955a074c0a574cb699344d2745df1 + CVE-2022-23712 + + + + file name: kotlin-stdlib-common-1.4.32.jar - ]]> - ef50bfa2c0491a11dcc35d9822edbfd6170e1ea2 - cpe:/a:jetbrains:kotlin - - - + ef50bfa2c0491a11dcc35d9822edbfd6170e1ea2 + cpe:/a:jetbrains:kotlin + + + file name: kotlin-stdlib-jdk7-1.4.32.jar - ]]> - 3546900a3ebff0c43f31190baf87a9220e37b7ea - CVE-2022-24329 - - - + 3546900a3ebff0c43f31190baf87a9220e37b7ea + CVE-2022-24329 + + + file name: kotlin-stdlib-jdk8-1.4.32.jar - ]]> - 3302f9ec8a5c1ed220781dbd37770072549bd333 - CVE-2022-24329 - - - + 3302f9ec8a5c1ed220781dbd37770072549bd333 + CVE-2022-24329 + + + file name: kotlin-stdlib-1.4.32.jar - ]]> - 461367948840adbb0839c51d91ed74ef4a9ccb52 - CVE-2022-24329 - - - - - + 461367948840adbb0839c51d91ed74ef4a9ccb52 + CVE-2022-24329 + + + + file name: canal.client-1.1.5.jar (shaded: com.google.guava:guava:22.0) - ]]> - b87878db57d5cfc2ca7d3972cc8f7486bf02fbca - CVE-2018-10237 - - - + b87878db57d5cfc2ca7d3972cc8f7486bf02fbca + CVE-2018-10237 + + + file name: canal.client-1.1.5.jar (shaded: com.google.guava:guava:22.0) - ]]> - b87878db57d5cfc2ca7d3972cc8f7486bf02fbca - CVE-2020-8908 - - - + b87878db57d5cfc2ca7d3972cc8f7486bf02fbca + CVE-2020-8908 + + + file name: canal.client-1.1.5.jar (shaded: com.google.guava:guava:32.1.1) CVE cannot take effect. Already covered by PR https://github.com/apache/pulsar/pull/20699 - ]]> - b87878db57d5cfc2ca7d3972cc8f7486bf02fbca - CVE-2023-2976 - - - - + b87878db57d5cfc2ca7d3972cc8f7486bf02fbca + CVE-2023-2976 + + + + file name: avro-1.10.2.jar - ]]> - a65aaa91c1aeceb3dd4859dbb9765d1c2063f5a2 - CVE-2021-43045 - - - + a65aaa91c1aeceb3dd4859dbb9765d1c2063f5a2 + CVE-2021-43045 + + + file name: clickhouse-jdbc-0.3.2.jar - ]]> - fa9a1ccda7d78edb51a3a33d3493566092786a30 - CVE-2018-14668 - - - + fa9a1ccda7d78edb51a3a33d3493566092786a30 + CVE-2018-14668 + + + file name: clickhouse-jdbc-0.3.2.jar - ]]> - fa9a1ccda7d78edb51a3a33d3493566092786a30 - CVE-2018-14669 - - - + fa9a1ccda7d78edb51a3a33d3493566092786a30 + CVE-2018-14669 + + + file name: clickhouse-jdbc-0.3.2.jar - ]]> - fa9a1ccda7d78edb51a3a33d3493566092786a30 - CVE-2018-14670 - - - + fa9a1ccda7d78edb51a3a33d3493566092786a30 + CVE-2018-14670 + + + file name: clickhouse-jdbc-0.3.2.jar - ]]> - fa9a1ccda7d78edb51a3a33d3493566092786a30 - CVE-2018-14671 - - - + fa9a1ccda7d78edb51a3a33d3493566092786a30 + CVE-2018-14671 + + + file name: clickhouse-jdbc-0.3.2.jar - ]]> - fa9a1ccda7d78edb51a3a33d3493566092786a30 - CVE-2018-14672 - - - + fa9a1ccda7d78edb51a3a33d3493566092786a30 + CVE-2018-14672 + + + file name: clickhouse-jdbc-0.3.2.jar - ]]> - fa9a1ccda7d78edb51a3a33d3493566092786a30 - CVE-2019-15024 - - - + fa9a1ccda7d78edb51a3a33d3493566092786a30 + CVE-2019-15024 + + + file name: clickhouse-jdbc-0.3.2.jar - ]]> - fa9a1ccda7d78edb51a3a33d3493566092786a30 - CVE-2019-16535 - - - + fa9a1ccda7d78edb51a3a33d3493566092786a30 + CVE-2019-16535 + + + file name: clickhouse-jdbc-0.3.2.jar - ]]> - fa9a1ccda7d78edb51a3a33d3493566092786a30 - CVE-2019-18657 - - - + fa9a1ccda7d78edb51a3a33d3493566092786a30 + CVE-2019-18657 + + + file name: clickhouse-jdbc-0.3.2.jar - ]]> - fa9a1ccda7d78edb51a3a33d3493566092786a30 - CVE-2021-25263 - - - + fa9a1ccda7d78edb51a3a33d3493566092786a30 + CVE-2021-25263 + + + file name: clickhouse-jdbc-0.4.6-all.jar (shaded: com.google.guava:guava:32.1.1) CVE cannot take effect. Already covered by PR https://github.com/apache/pulsar/pull/20699 - ]]> - d3b929509399a698915b24ff47db781d0c526760 - CVE-2023-2976 - - - + d3b929509399a698915b24ff47db781d0c526760 + CVE-2023-2976 + + + file name: logback-core-1.1.3.jar - ]]> - e3c02049f2dbbc764681b40094ecf0dcbc99b157 - cpe:/a:qos:logback - - - + e3c02049f2dbbc764681b40094ecf0dcbc99b157 + cpe:/a:qos:logback + + + file name: rocketmq-acl-4.5.2.jar - ]]> - 0e2bd9c162280cd79c2ea0f67f174ee5d7b84ddd - cpe:/a:apache:rocketmq - - - - ^pkg:maven/org\.springframework/spring.*$ - CVE-2016-1000027 - - - + 0e2bd9c162280cd79c2ea0f67f174ee5d7b84ddd + cpe:/a:apache:rocketmq + + + Ignored since we are not vulnerable + ^pkg:maven/org\.springframework/spring.*$ + CVE-2016-1000027 + + + file name: logback-classic-1.1.3.jar - ]]> - d90276fff414f06cb375f2057f6778cd63c6082f - cpe:/a:qos:logback - - - + d90276fff414f06cb375f2057f6778cd63c6082f + cpe:/a:qos:logback + + + file name: logback-core-1.1.3.jar - ]]> - e3c02049f2dbbc764681b40094ecf0dcbc99b157 - CVE-2017-5929 - - - + e3c02049f2dbbc764681b40094ecf0dcbc99b157 + CVE-2017-5929 + + + file name: logback-classic-1.1.3.jar - ]]> - d90276fff414f06cb375f2057f6778cd63c6082f - CVE-2017-5929 - - - + d90276fff414f06cb375f2057f6778cd63c6082f + CVE-2017-5929 + + + file name: logback-classic-1.1.3.jar - ]]> - d90276fff414f06cb375f2057f6778cd63c6082f - CVE-2021-42550 - - - - Ignore etdc CVEs in jetcd - .*jetcd.* - cpe:/a:etcd:etcd - - - Ignore etdc CVEs in jetcd - .*jetcd.* - cpe:/a:redhat:etcd - - - Ignore grpc CVEs in jetcd - .*jetcd-grpc.* - cpe:/a:grpc:grpc - - - - - + d90276fff414f06cb375f2057f6778cd63c6082f + CVE-2021-42550 + + + Ignore etdc CVEs in jetcd + .*jetcd.* + cpe:/a:etcd:etcd + + + Ignore etdc CVEs in jetcd + .*jetcd.* + cpe:/a:redhat:etcd + + + Ignore grpc CVEs in jetcd + .*jetcd-grpc.* + cpe:/a:grpc:grpc + + + + file name: bc-fips-1.0.2.jar - ]]> - 4fb5db5f03d00f6a94e43b78d097978190e4abb2 - CVE-2020-26939 - - - + 4fb5db5f03d00f6a94e43b78d097978190e4abb2 + CVE-2020-26939 + + + file name: bcpkix-fips-1.0.2.jar - ]]> - 543bc7a08cdba0172e95e536b5f7ca61f021253d - CVE-2020-15522 - - - + 543bc7a08cdba0172e95e536b5f7ca61f021253d + CVE-2020-15522 + + + file name: bcpkix-fips-1.0.2.jar - ]]> - 543bc7a08cdba0172e95e536b5f7ca61f021253d - CVE-2020-26939 - - - - - + 543bc7a08cdba0172e95e536b5f7ca61f021253d + CVE-2020-26939 + + + + file name: openstack-swift-2.5.0.jar - ]]> - d99d0eab2e01d69d8a326fc152427fbd759af88a - CVE-2016-0738 - - - + d99d0eab2e01d69d8a326fc152427fbd759af88a + CVE-2016-0738 + + + file name: openstack-swift-2.5.0.jar - ]]> - d99d0eab2e01d69d8a326fc152427fbd759af88a - CVE-2017-16613 - - - + d99d0eab2e01d69d8a326fc152427fbd759af88a + CVE-2017-16613 + + + file name: openstack-keystone-2.5.0.jar - ]]> - a7e89bd278fa8be9fa604dda66d1606de5530797 - CVE-2018-14432 - - - + a7e89bd278fa8be9fa604dda66d1606de5530797 + CVE-2018-14432 + + + file name: openstack-keystone-2.5.0.jar - ]]> - a7e89bd278fa8be9fa604dda66d1606de5530797 - CVE-2018-20170 - - - + a7e89bd278fa8be9fa604dda66d1606de5530797 + CVE-2018-20170 + + + file name: openstack-keystone-2.5.0.jar - ]]> - a7e89bd278fa8be9fa604dda66d1606de5530797 - CVE-2020-12689 - - - + a7e89bd278fa8be9fa604dda66d1606de5530797 + CVE-2020-12689 + + + file name: openstack-keystone-2.5.0.jar - ]]> - a7e89bd278fa8be9fa604dda66d1606de5530797 - CVE-2020-12690 - - - + a7e89bd278fa8be9fa604dda66d1606de5530797 + CVE-2020-12690 + + + file name: openstack-keystone-2.5.0.jar - ]]> - a7e89bd278fa8be9fa604dda66d1606de5530797 - CVE-2020-12691 - - - + a7e89bd278fa8be9fa604dda66d1606de5530797 + CVE-2020-12691 + + + file name: openstack-keystone-2.5.0.jar - ]]> - a7e89bd278fa8be9fa604dda66d1606de5530797 - CVE-2020-12692 - - - + a7e89bd278fa8be9fa604dda66d1606de5530797 + CVE-2020-12692 + + + file name: openstack-keystone-2.5.0.jar - ]]> - a7e89bd278fa8be9fa604dda66d1606de5530797 - CVE-2021-3563 - - - - - + file name: org.apache.pulsar:pulsar-io-solr:2.10.0-SNAPSHOT - ]]> - ^pkg:maven/org\.apache\.pulsar/pulsar\-io\-solr@.*-SNAPSHOT$ - cpe:/a:apache:pulsar - - - + ^pkg:maven/org\.apache\.pulsar/pulsar\-io\-solr@.*-SNAPSHOT$ + cpe:/a:apache:pulsar + + + file name: org.apache.pulsar:pulsar-io-solr:2.10.0-SNAPSHOT - ]]> - ^pkg:maven/org\.apache\.pulsar/pulsar\-io\-solr@.*-SNAPSHOT$ - cpe:/a:apache:solr - - - - - + ^pkg:maven/org\.apache\.pulsar/pulsar\-io\-solr@.*-SNAPSHOT$ + cpe:/a:apache:solr + + + + file name: debezium-connector-mysql-1.9.7.Final.jar - ]]> - 74c623b4a9b231e2f0e8f6053b38abd3bc487ce2 - CVE-2017-15945 - - - + 74c623b4a9b231e2f0e8f6053b38abd3bc487ce2 + CVE-2017-15945 + + + file name: mysql-binlog-connector-java-0.27.2.jar - ]]> - 23294cd730e29c00b8ddfbde517dfc955aba090f - CVE-2017-15945 - - - + 23294cd730e29c00b8ddfbde517dfc955aba090f + CVE-2017-15945 + + + file name: debezium-connector-postgres-1.9.7.Final.jar - ]]> - 300ff0bbf795643e914b7c8a6d6ba812a8354d62 - CVE-2015-0241 - CVE-2015-0242 - CVE-2015-0243 - CVE-2015-0244 - CVE-2015-3166 - CVE-2015-3167 - CVE-2016-0766 - CVE-2016-0768 - CVE-2016-0773 - CVE-2016-5423 - CVE-2016-5424 - CVE-2016-7048 - CVE-2017-14798 - CVE-2017-7484 - CVE-2018-1115 - CVE-2019-10127 - CVE-2019-10128 - CVE-2019-10210 - CVE-2019-10211 - CVE-2020-25694 - CVE-2020-25695 - CVE-2021-23214 - - - + 300ff0bbf795643e914b7c8a6d6ba812a8354d62 + CVE-2015-0241 + CVE-2015-0242 + CVE-2015-0243 + CVE-2015-0244 + CVE-2015-3166 + CVE-2015-3167 + CVE-2016-0766 + CVE-2016-0768 + CVE-2016-0773 + CVE-2016-5423 + CVE-2016-5424 + CVE-2016-7048 + CVE-2017-14798 + CVE-2017-7484 + CVE-2018-1115 + CVE-2019-10127 + CVE-2019-10128 + CVE-2019-10210 + CVE-2019-10211 + CVE-2020-25694 + CVE-2020-25695 + CVE-2021-23214 + + + file name: protostream-types-4.4.1.Final.jar - ]]> - 29b45ebea1e4ce62ab3ec5eb76fa9771f98941b0 - CVE-2016-0750 - CVE-2017-15089 - CVE-2017-2638 - CVE-2019-10158 - CVE-2019-10174 - CVE-2020-25711 - - - + 29b45ebea1e4ce62ab3ec5eb76fa9771f98941b0 + CVE-2016-0750 + CVE-2017-15089 + CVE-2017-2638 + CVE-2019-10158 + CVE-2019-10174 + CVE-2020-25711 + + + file name: mariadb-java-client-2.7.5.jar - ]]> - 9dd29797ecabe7d2e7fa892ec6713a5552cfcc59 - CVE-2020-28912 - CVE-2021-46669 - CVE-2021-46666 - CVE-2021-46667 - - - - + 9dd29797ecabe7d2e7fa892ec6713a5552cfcc59 + CVE-2020-28912 + CVE-2021-46669 + CVE-2021-46666 + CVE-2021-46667 + + + + file name: cassandra-driver-core-3.11.2.jar - ]]> - e0aad9f8611e710b9a0ce49747f7465ce07d8404 - CVE-2020-17516 - CVE-2021-44521 - - - + e0aad9f8611e710b9a0ce49747f7465ce07d8404 + CVE-2020-17516 + CVE-2021-44521 + + + The vulnerable method is deprecated in Guava, but isn't removed. It's necessary to suppress this CVE. See https://github.com/google/guava/issues/4011 - ]]> - CVE-2020-8908 - - - + CVE-2020-8908 + + + This is a false positive in jackson-databind. See https://github.com/FasterXML/jackson-databind/issues/3972#issuecomment-1596604021 - ]]> - CVE-2023-35116 - + + CVE-2023-35116 + + + + This is a false positive in avro-protobuf. The vulnerability is in Hamba avro golang library. + + CVE-2023-37475 + + + + This CVE can be suppressed since it is covered in Pulsar by hostname verification changes made in https://github.com/apache/pulsar/pull/15824. + + CVE-2023-4586 + diff --git a/src/settings.xml b/src/settings.xml index 80ec2f40620e7..8fc2f13a37a59 100644 --- a/src/settings.xml +++ b/src/settings.xml @@ -1,3 +1,4 @@ + apache.releases.https @@ -34,7 +34,6 @@ ${env.APACHE_PASSWORD} - apache diff --git a/structured-event-log/pom.xml b/structured-event-log/pom.xml index c42a75397d0ca..414d37b217392 100644 --- a/structured-event-log/pom.xml +++ b/structured-event-log/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - structured-event-log Structured event logger - org.slf4j slf4j-api - org.apache.logging.log4j @@ -60,7 +56,6 @@ test - @@ -78,5 +73,4 @@ - diff --git a/testmocks/pom.xml b/testmocks/pom.xml index 30a29c7e3f9c4..54c705e17f190 100644 --- a/testmocks/pom.xml +++ b/testmocks/pom.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - pulsar - org.apache.pulsar + io.streamnative 3.1.0-SNAPSHOT - testmocks jar Pulsar Test Mocks - - org.apache.zookeeper zookeeper @@ -52,29 +48,23 @@ - org.apache.bookkeeper bookkeeper-server - org.apache.commons commons-lang3 - org.testng testng - org.objenesis objenesis - - @@ -92,5 +82,4 @@ - diff --git a/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java b/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java index d023427e3be31..dd33c2c4532bf 100644 --- a/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java +++ b/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java @@ -52,7 +52,6 @@ public BookKeeperTestClient(ClientConfiguration conf, ZooKeeper zkc) throws IOException, InterruptedException, BKException { super(conf, zkc, null, new UnpooledByteBufAllocator(false), NullStatsLogger.INSTANCE, null, null, null); - this.statsProvider = statsProvider; } public BookKeeperTestClient(ClientConfiguration conf) diff --git a/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockBookKeeper.java b/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockBookKeeper.java index f0d279ef25050..998ef73fbd3e9 100644 --- a/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockBookKeeper.java +++ b/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockBookKeeper.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.client; +import static com.google.common.base.Preconditions.checkArgument; import com.google.common.collect.Lists; import java.util.ArrayList; import java.util.Arrays; @@ -364,6 +365,7 @@ public synchronized CompletableFuture promiseAfter(int steps, List= 0, "The delay time must not be negative."); addEntryDelaysMillis.add(unit.toMillis(delay)); } diff --git a/testmocks/src/main/java/org/apache/bookkeeper/client/TestStatsProvider.java b/testmocks/src/main/java/org/apache/bookkeeper/client/TestStatsProvider.java index 4d08a7f80df5b..cf08cc635106e 100644 --- a/testmocks/src/main/java/org/apache/bookkeeper/client/TestStatsProvider.java +++ b/testmocks/src/main/java/org/apache/bookkeeper/client/TestStatsProvider.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.client; +import static com.google.common.base.Preconditions.checkArgument; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; @@ -65,6 +66,7 @@ public void addCount(long delta) { @Override public void addLatency(long eventLatency, TimeUnit unit) { + checkArgument(eventLatency >= 0, "The event latency must not be negative."); long valueMillis = unit.toMillis(eventLatency); updateMax(val.addAndGet(valueMillis)); } diff --git a/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java b/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java index 0c0f7ec9ed1d4..f32036e53f001 100644 --- a/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java +++ b/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java @@ -1114,7 +1114,7 @@ Optional programmedFailure(Op op, String path) { Optional failure = failures.stream().filter(f -> f.predicate.test(op, path)).findFirst(); if (failure.isPresent()) { failures.remove(failure.get()); - return Optional.of(failure.get().failReturnCode); + return Optional.ofNullable(failure.get().failReturnCode); } else { return Optional.empty(); } @@ -1131,6 +1131,18 @@ public void failConditional(KeeperException.Code rc, BiPredicate pre failures.add(new Failure(rc, predicate)); } + public void delay(long millis, BiPredicate predicate) { + failures.add(new Failure(null, (op, s) -> { + if (predicate.test(op, s)) { + try { + Thread.sleep(millis); + } catch (InterruptedException e) {} + return true; + } + return false; + })); + } + public void setAlwaysFail(KeeperException.Code rc) { this.alwaysFail.set(rc); } diff --git a/tests/bc_2_0_0/pom.xml b/tests/bc_2_0_0/pom.xml index eacf4623edff5..77214fe9de624 100644 --- a/tests/bc_2_0_0/pom.xml +++ b/tests/bc_2_0_0/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - org.apache.pulsar.tests - tests-parent - 3.1.0-SNAPSHOT - - - bc_2_0_0 - jar - Apache Pulsar :: Tests :: Backwards Client Compatibility 2.0.0-rc1-incubating - - - - - org.apache.pulsar - pulsar-client - 2.0.0-rc1-incubating - test - - - - org.testcontainers - testcontainers - test - - - - - + + 4.0.0 + + io.streamnative.tests + tests-parent + 3.1.0-SNAPSHOT + + bc_2_0_0 + jar + Apache Pulsar :: Tests :: Backwards Client Compatibility 2.0.0-rc1-incubating + + + org.apache.pulsar + pulsar-client + 2.0.0-rc1-incubating + test + + + org.testcontainers + testcontainers + test + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + true + + ${project.version} + ${project.build.directory} + + + + + + + + BackwardsCompatTests + + + BackwardsCompatTests + + + - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - - true - - ${project.version} - ${project.build.directory} - - - - - - - - - BackwardsCompatTests - - - BackwardsCompatTests - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G + + org.apache.maven.plugins + maven-surefire-plugin + + ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G -Dio.netty.leakDetectionLevel=advanced ${test.additional.args} - false - - src/test/resources/pulsar.xml - - 1 - - - - - - + false + + src/test/resources/pulsar.xml + + 1 + + + + + + diff --git a/tests/bc_2_0_0/src/test/resources/pulsar.xml b/tests/bc_2_0_0/src/test/resources/pulsar.xml index 43dfaea15813c..2e9d57ed72832 100644 --- a/tests/bc_2_0_0/src/test/resources/pulsar.xml +++ b/tests/bc_2_0_0/src/test/resources/pulsar.xml @@ -1,3 +1,4 @@ + - + - - - - - - \ No newline at end of file + + + + + + diff --git a/tests/bc_2_0_1/pom.xml b/tests/bc_2_0_1/pom.xml index d624c66170d56..5c3e244d0b41f 100644 --- a/tests/bc_2_0_1/pom.xml +++ b/tests/bc_2_0_1/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - org.apache.pulsar.tests - tests-parent - 3.1.0-SNAPSHOT - - - bc_2_0_1 - jar - Apache Pulsar :: Tests :: Backwards Client Compatibility 2.0.1-incubating - - - - - org.apache.pulsar - pulsar-client - 2.0.1-incubating - test - - - - org.testcontainers - testcontainers - test - - - - - + + 4.0.0 + + io.streamnative.tests + tests-parent + 3.1.0-SNAPSHOT + + bc_2_0_1 + jar + Apache Pulsar :: Tests :: Backwards Client Compatibility 2.0.1-incubating + + + org.apache.pulsar + pulsar-client + 2.0.1-incubating + test + + + org.testcontainers + testcontainers + test + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + true + + ${project.version} + ${project.build.directory} + + + + + + + + BackwardsCompatTests + + + BackwardsCompatTests + + + - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - - true - - ${project.version} - ${project.build.directory} - - - - - - - - - BackwardsCompatTests - - - BackwardsCompatTests - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G + + org.apache.maven.plugins + maven-surefire-plugin + + ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G -Dio.netty.leakDetectionLevel=advanced ${test.additional.args} - false - - src/test/resources/pulsar.xml - - 1 - - - - - - + false + + src/test/resources/pulsar.xml + + 1 + + + + + + diff --git a/tests/bc_2_0_1/src/test/resources/pulsar.xml b/tests/bc_2_0_1/src/test/resources/pulsar.xml index 43dfaea15813c..2e9d57ed72832 100644 --- a/tests/bc_2_0_1/src/test/resources/pulsar.xml +++ b/tests/bc_2_0_1/src/test/resources/pulsar.xml @@ -1,3 +1,4 @@ + - + - - - - - - \ No newline at end of file + + + + + + diff --git a/tests/bc_2_6_0/pom.xml b/tests/bc_2_6_0/pom.xml index 2cd8e7f786809..b53fbe28ba865 100644 --- a/tests/bc_2_6_0/pom.xml +++ b/tests/bc_2_6_0/pom.xml @@ -1,4 +1,4 @@ - + - - - org.apache.pulsar.tests - tests-parent - 3.1.0-SNAPSHOT - - 4.0.0 - - bc_2_6_0 - jar - Apache Pulsar :: Tests :: Backwards Client Compatibility 2.6.0 - - - - - com.google.code.gson - gson - test - - - - org.apache.pulsar - pulsar-client - 2.6.0 - test - - - - org.testcontainers - testcontainers - test - - - - - + + + io.streamnative.tests + tests-parent + 3.1.0-SNAPSHOT + + 4.0.0 + bc_2_6_0 + jar + Apache Pulsar :: Tests :: Backwards Client Compatibility 2.6.0 + + + com.google.code.gson + gson + test + + + org.apache.pulsar + pulsar-client + 2.6.0 + test + + + org.testcontainers + testcontainers + test + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + true + + ${project.version} + ${project.build.directory} + + + + + + + + BackwardsCompatTests + + + BackwardsCompatTests + + + - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - - true - - ${project.version} - ${project.build.directory} - - - - - - - - - BackwardsCompatTests - - - BackwardsCompatTests - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G + + org.apache.maven.plugins + maven-surefire-plugin + + ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G -Dio.netty.leakDetectionLevel=advanced ${test.additional.args} - false - - src/test/resources/backwards-client.xml - - 1 - - - - - - - - + false + + src/test/resources/backwards-client.xml + + 1 + + + + + + diff --git a/tests/bc_2_6_0/src/test/resources/backwards-client.xml b/tests/bc_2_6_0/src/test/resources/backwards-client.xml index 43dfaea15813c..2e9d57ed72832 100644 --- a/tests/bc_2_6_0/src/test/resources/backwards-client.xml +++ b/tests/bc_2_6_0/src/test/resources/backwards-client.xml @@ -1,3 +1,4 @@ + - + - - - - - - \ No newline at end of file + + + + + + diff --git a/tests/docker-images/java-test-functions/pom.xml b/tests/docker-images/java-test-functions/pom.xml index 0afb21c6a03a2..0197542a424c5 100644 --- a/tests/docker-images/java-test-functions/pom.xml +++ b/tests/docker-images/java-test-functions/pom.xml @@ -1,3 +1,4 @@ + - + - org.apache.pulsar.tests + io.streamnative.tests docker-images 3.1.0-SNAPSHOT @@ -30,12 +30,12 @@ Apache Pulsar :: Tests :: Docker Images :: Java Test Functions - org.apache.pulsar + io.streamnative pulsar-io-core ${project.version} - org.apache.pulsar + io.streamnative pulsar-functions-api ${project.version} @@ -59,7 +59,6 @@ jar - docker @@ -68,15 +67,13 @@ integrationTests - - org.apache.pulsar + io.streamnative pulsar-functions-api-examples ${project.version} - @@ -91,7 +88,7 @@ - org.apache.pulsar:pulsar-functions-api-examples + io.streamnative:pulsar-functions-api-examples @@ -102,5 +99,4 @@ - diff --git a/tests/docker-images/java-test-image/pom.xml b/tests/docker-images/java-test-image/pom.xml index 84b02ad137cfe..033bf22f3d6e7 100644 --- a/tests/docker-images/java-test-image/pom.xml +++ b/tests/docker-images/java-test-image/pom.xml @@ -1,3 +1,4 @@ + - + - org.apache.pulsar.tests + io.streamnative.tests docker-images 3.1.0-SNAPSHOT @@ -29,7 +29,6 @@ java-test-image Apache Pulsar :: Tests :: Docker Images :: Java Test Image pom - docker @@ -45,12 +44,12 @@ - org.apache.pulsar.tests + io.streamnative.tests java-test-functions ${project.parent.version} - org.apache.pulsar + io.streamnative pulsar-server-distribution ${project.parent.version} bin @@ -78,7 +77,7 @@ - org.apache.pulsar.tests + io.streamnative.tests java-test-functions ${project.parent.version} jar @@ -87,7 +86,7 @@ java-test-functions.jar - org.apache.pulsar + io.streamnative pulsar-server-distribution ${project.parent.version} bin diff --git a/tests/docker-images/java-test-plugins/pom.xml b/tests/docker-images/java-test-plugins/pom.xml index 16b0ec1911793..4b1817453bb79 100644 --- a/tests/docker-images/java-test-plugins/pom.xml +++ b/tests/docker-images/java-test-plugins/pom.xml @@ -1,3 +1,4 @@ + - + - org.apache.pulsar.tests + io.streamnative.tests docker-images 3.1.0-SNAPSHOT @@ -31,7 +31,7 @@ jar - org.apache.pulsar + io.streamnative pulsar-broker ${project.version} provided @@ -45,5 +45,4 @@ - diff --git a/tests/docker-images/latest-version-image/Dockerfile b/tests/docker-images/latest-version-image/Dockerfile index 99672773dcbc8..602f917700b65 100644 --- a/tests/docker-images/latest-version-image/Dockerfile +++ b/tests/docker-images/latest-version-image/Dockerfile @@ -40,10 +40,6 @@ FROM apachepulsar/pulsar:latest # However, any processes exec'ing into the containers will run as root, by default. USER root -# We need to define the user in order for supervisord to work correctly -# We don't need a user defined in the public docker image, though. -RUN adduser -u 10000 --gid 0 --disabled-login --disabled-password --gecos '' pulsar - RUN rm -rf /var/lib/apt/lists/* && apt update RUN apt-get clean && apt-get update && apt-get install -y supervisor vim procps curl diff --git a/tests/docker-images/latest-version-image/conf/functions_worker.conf b/tests/docker-images/latest-version-image/conf/functions_worker.conf index 8072639a0d4a2..6feb660231cec 100644 --- a/tests/docker-images/latest-version-image/conf/functions_worker.conf +++ b/tests/docker-images/latest-version-image/conf/functions_worker.conf @@ -22,7 +22,7 @@ autostart=false redirect_stderr=true stdout_logfile=/var/log/pulsar/functions_worker.log directory=/pulsar -environment=PULSAR_MEM="-Xmx128M",PULSAR_GC="-XX:+UseZGC" +environment=PULSAR_MEM="-Xmx128M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/pulsar/logs/functions",PULSAR_GC="-XX:+UseZGC" command=/pulsar/bin/pulsar functions-worker user=pulsar stopwaitsecs=15 \ No newline at end of file diff --git a/tests/docker-images/latest-version-image/pom.xml b/tests/docker-images/latest-version-image/pom.xml index 2aac3b14be2ea..843932076d4cf 100644 --- a/tests/docker-images/latest-version-image/pom.xml +++ b/tests/docker-images/latest-version-image/pom.xml @@ -1,3 +1,4 @@ + - + - org.apache.pulsar.tests + io.streamnative.tests docker-images 3.1.0-SNAPSHOT @@ -29,7 +29,6 @@ latest-version-image Apache Pulsar :: Tests :: Docker Images :: Latest Version Testing pom - docker @@ -40,17 +39,17 @@ - org.apache.pulsar.tests + io.streamnative.tests java-test-functions ${project.parent.version} - org.apache.pulsar.tests + io.streamnative.tests java-test-plugins ${project.parent.version} - org.apache.pulsar + io.streamnative pulsar-all-docker-image ${project.parent.version} pom @@ -77,7 +76,7 @@ - org.apache.pulsar.tests + io.streamnative.tests java-test-functions ${project.parent.version} jar @@ -86,7 +85,7 @@ java-test-functions.jar - org.apache.pulsar.tests + io.streamnative.tests java-test-plugins ${project.parent.version} jar diff --git a/tests/docker-images/pom.xml b/tests/docker-images/pom.xml index ce45569c2f1a9..a0426a7d6e925 100644 --- a/tests/docker-images/pom.xml +++ b/tests/docker-images/pom.xml @@ -1,4 +1,4 @@ - + - + pom 4.0.0 - org.apache.pulsar.tests + io.streamnative.tests tests-parent 3.1.0-SNAPSHOT diff --git a/tests/integration/pom.xml b/tests/integration/pom.xml index 40e97f5a6ebc5..9af50045a52d8 100644 --- a/tests/integration/pom.xml +++ b/tests/integration/pom.xml @@ -1,4 +1,4 @@ - + - + 4.0.0 - org.apache.pulsar.tests + io.streamnative.tests tests-parent 3.1.0-SNAPSHOT - integration jar Apache Pulsar :: Tests :: Integration - pulsar.xml 4.1.2 - com.google.code.gson @@ -44,37 +40,37 @@ test - org.apache.pulsar + io.streamnative pulsar-functions-api-examples ${project.version} test - org.apache.pulsar + io.streamnative pulsar-broker ${project.version} test - org.apache.pulsar + io.streamnative pulsar-common ${project.version} test - org.apache.pulsar + io.streamnative pulsar-client-original ${project.version} test - org.apache.pulsar + io.streamnative pulsar-client-admin-original ${project.version} test - org.apache.pulsar + io.streamnative managed-ledger ${project.version} test @@ -96,31 +92,27 @@ test - org.apache.pulsar + io.streamnative pulsar-io-kafka ${project.version} test - org.testcontainers mysql test - org.postgresql postgresql ${postgresql-jdbc.version} runtime - org.testcontainers postgresql test - com.github.docker-java docker-java-core @@ -131,72 +123,60 @@ bcpkix-jdk18on test - - org.apache.pulsar + io.streamnative pulsar-io-jdbc-postgres ${project.version} test - com.fasterxml.jackson.core jackson-databind test - com.fasterxml.jackson.dataformat jackson-dataformat-yaml test - org.opensearch.client opensearch-rest-high-level-client test - co.elastic.clients elasticsearch-java test - org.testcontainers elasticsearch test - - com.rabbitmq amqp-client ${rabbitmq-client.version} test - joda-time joda-time test - io.trino trino-jdbc ${trino.version} test - org.awaitility awaitility test - - + org.testcontainers localstack @@ -213,7 +193,6 @@ aws-java-sdk-core test - org.testcontainers @@ -226,9 +205,7 @@ ${mongo-reactivestreams.version} test - - @@ -269,7 +246,6 @@ - integrationTests diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java index 7e8f55429244b..f13a4dcfbdceb 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java @@ -402,7 +402,7 @@ public void testSchemaCLI() throws Exception { "upload", topicName, "-f", - "/pulsar/conf/schema_example.conf" + "/pulsar/conf/schema_example.json" ); result.assertNoOutput(); diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BrokerContainer.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BrokerContainer.java index 616d45554d75c..a51397050b97f 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BrokerContainer.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BrokerContainer.java @@ -28,8 +28,13 @@ public class BrokerContainer extends PulsarContainer { public static final String NAME = "pulsar-broker"; public BrokerContainer(String clusterName, String hostName) { - super(clusterName, hostName, hostName, "bin/run-broker.sh", BROKER_PORT, BROKER_PORT_TLS, - BROKER_HTTP_PORT, BROKER_HTTPS_PORT, DEFAULT_HTTP_PATH, DEFAULT_IMAGE_NAME); + this(clusterName, hostName, false); + } + + public BrokerContainer(String clusterName, String hostName, boolean enableTls) { + super(clusterName, hostName, hostName, "bin/run-broker.sh", BROKER_PORT, + enableTls ? BROKER_PORT_TLS : 0, BROKER_HTTP_PORT, + enableTls ? BROKER_HTTPS_PORT : 0, DEFAULT_HTTP_PATH, DEFAULT_IMAGE_NAME); tailContainerLog(); } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/ProxyContainer.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/ProxyContainer.java index 53283447378f5..f3926878f37c5 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/ProxyContainer.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/ProxyContainer.java @@ -28,8 +28,13 @@ public class ProxyContainer extends PulsarContainer { public static final String NAME = "pulsar-proxy"; public ProxyContainer(String clusterName, String hostName) { - super(clusterName, hostName, hostName, "bin/run-proxy.sh", BROKER_PORT, BROKER_PORT_TLS, BROKER_HTTP_PORT, - BROKER_HTTPS_PORT, DEFAULT_HTTP_PATH, DEFAULT_IMAGE_NAME); + this(clusterName, hostName, false); + } + + public ProxyContainer(String clusterName, String hostName, boolean enableTls) { + super(clusterName, hostName, hostName, "bin/run-proxy.sh", BROKER_PORT, + enableTls ? BROKER_PORT_TLS : 0, BROKER_HTTP_PORT, + enableTls ? BROKER_HTTPS_PORT : 0, DEFAULT_HTTP_PATH, DEFAULT_IMAGE_NAME); } @Override diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/loadbalance/ExtensibleLoadManagerTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/loadbalance/ExtensibleLoadManagerTest.java index 057039edc3be2..af14ef97f85c3 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/loadbalance/ExtensibleLoadManagerTest.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/loadbalance/ExtensibleLoadManagerTest.java @@ -38,6 +38,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; import lombok.extern.slf4j.Slf4j; @@ -53,6 +54,7 @@ import org.apache.pulsar.tests.integration.containers.BrokerContainer; import org.apache.pulsar.tests.integration.topologies.PulsarCluster; import org.apache.pulsar.tests.integration.topologies.PulsarClusterSpec; +import org.awaitility.Awaitility; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; @@ -126,6 +128,26 @@ public void startBroker() { brokerContainer.start(); } }); + String topicName = "persistent://" + DEFAULT_NAMESPACE + "/startBrokerCheck"; + Awaitility.await().atMost(120, TimeUnit.SECONDS).ignoreExceptions().until( + () -> { + for (BrokerContainer brokerContainer : pulsarCluster.getBrokers()) { + try (PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl( + brokerContainer.getHttpServiceUrl()).build()) { + if (admin.brokers().getActiveBrokers(clusterName).size() != NUM_BROKERS) { + return false; + } + try { + admin.topics().createPartitionedTopic(topicName, 10); + } catch (PulsarAdminException.ConflictException e) { + // expected + } + admin.lookups().lookupPartitionedTopic(topicName); + } + } + return true; + } + ); } } @@ -242,7 +264,7 @@ public void testDeleteNamespace() throws Exception { assertFalse(admin.namespaces().getNamespaces(DEFAULT_TENANT).contains(namespace)); } - @Test(timeOut = 40 * 1000) + @Test(timeOut = 120 * 1000) public void testStopBroker() throws Exception { String topicName = "persistent://" + DEFAULT_NAMESPACE + "/test-stop-broker-topic"; @@ -258,9 +280,11 @@ public void testStopBroker() throws Exception { } } - String broker1 = admin.lookups().lookupTopic(topicName); + Awaitility.waitAtMost(60, TimeUnit.SECONDS).ignoreExceptions().untilAsserted(() -> { + String broker1 = admin.lookups().lookupTopic(topicName); + assertNotEquals(broker1, broker); + }); - assertNotEquals(broker1, broker); } @Test(timeOut = 40 * 1000) @@ -300,33 +324,52 @@ public void testAntiaffinityPolicy() throws PulsarAdminException { assertEquals(result.size(), NUM_BROKERS); } - @Test(timeOut = 40 * 1000) - public void testIsolationPolicy() throws PulsarAdminException { + @Test(timeOut = 300 * 1000) + public void testIsolationPolicy() throws Exception { final String namespaceIsolationPolicyName = "my-isolation-policy"; final String isolationEnabledNameSpace = DEFAULT_TENANT + "/my-isolation-policy" + nsSuffix; Map parameters1 = new HashMap<>(); parameters1.put("min_limit", "1"); parameters1.put("usage_threshold", "100"); - List activeBrokers = admin.brokers().getActiveBrokers(); + Awaitility.await().atMost(10, TimeUnit.SECONDS).ignoreExceptions().untilAsserted( + () -> { + List activeBrokers = admin.brokers().getActiveBrokersAsync() + .get(5, TimeUnit.SECONDS); + assertEquals(activeBrokers.size(), NUM_BROKERS); + } + ); + try { + admin.namespaces().createNamespace(isolationEnabledNameSpace); + } catch (PulsarAdminException.ConflictException e) { + //expected when retried + } - assertEquals(activeBrokers.size(), NUM_BROKERS); + try { + admin.clusters() + .createNamespaceIsolationPolicy(clusterName, namespaceIsolationPolicyName, NamespaceIsolationData + .builder() + .namespaces(List.of(isolationEnabledNameSpace)) + .autoFailoverPolicy(AutoFailoverPolicyData.builder() + .policyType(AutoFailoverPolicyType.min_available) + .parameters(parameters1) + .build()) + .primary(List.of(getHostName(0))) + .secondary(List.of(getHostName(1))) + .build()); + } catch (PulsarAdminException.ConflictException e) { + //expected when retried + } - admin.namespaces().createNamespace(isolationEnabledNameSpace); - admin.clusters().createNamespaceIsolationPolicy(clusterName, namespaceIsolationPolicyName, NamespaceIsolationData - .builder() - .namespaces(List.of(isolationEnabledNameSpace)) - .autoFailoverPolicy(AutoFailoverPolicyData.builder() - .policyType(AutoFailoverPolicyType.min_available) - .parameters(parameters1) - .build()) - .primary(List.of(getHostName(0))) - .secondary(List.of(getHostName(1))) - .build()); final String topic = "persistent://" + isolationEnabledNameSpace + "/topic"; - admin.topics().createNonPartitionedTopic(topic); + try { + admin.topics().createNonPartitionedTopic(topic); + } catch (PulsarAdminException.ConflictException e) { + //expected when retried + } String broker = admin.lookups().lookupTopic(topic); + assertEquals(extractBrokerIndex(broker), 0); for (BrokerContainer container : pulsarCluster.getBrokers()) { String name = container.getHostName(); @@ -335,11 +378,18 @@ public void testIsolationPolicy() throws PulsarAdminException { } } - assertEquals(extractBrokerIndex(broker), 0); - - broker = admin.lookups().lookupTopic(topic); + Awaitility.await().atMost(60, TimeUnit.SECONDS).ignoreExceptions().untilAsserted( + () -> { + List activeBrokers = admin.brokers().getActiveBrokersAsync() + .get(5, TimeUnit.SECONDS); + assertEquals(activeBrokers.size(), 2); + } + ); - assertEquals(extractBrokerIndex(broker), 1); + Awaitility.await().atMost(60, TimeUnit.SECONDS).ignoreExceptions().untilAsserted(() -> { + String ownerBroker = admin.lookups().lookupTopicAsync(topic).get(5, TimeUnit.SECONDS); + assertEquals(extractBrokerIndex(ownerBroker), 1); + }); for (BrokerContainer container : pulsarCluster.getBrokers()) { String name = container.getHostName(); @@ -347,14 +397,24 @@ public void testIsolationPolicy() throws PulsarAdminException { container.stop(); } } - try { - admin.lookups().lookupTopic(topic); - fail(); - } catch (Exception ex) { - log.error("Failed to lookup topic: ", ex); - assertThat(ex.getMessage()).containsAnyOf("Failed to look up a broker", - "Failed to select the new owner broker for bundle"); - } + + Awaitility.await().atMost(60, TimeUnit.SECONDS).ignoreExceptions().untilAsserted( + () -> { + List activeBrokers = admin.brokers().getActiveBrokersAsync().get(5, TimeUnit.SECONDS); + assertEquals(activeBrokers.size(), 1); + } + ); + + Awaitility.await().atMost(60, TimeUnit.SECONDS).ignoreExceptions().untilAsserted( + () -> { + try { + admin.lookups().lookupTopicAsync(topic).get(5, TimeUnit.SECONDS); + } catch (Exception ex) { + log.error("Failed to lookup topic: ", ex); + assertThat(ex.getMessage()).contains("Failed to select the new owner broker for bundle"); + } + } + ); } private String getBrokerUrl(int index) { diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLAuth.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLAuth.java index 0a9bb5e19592a..87db46f2bb625 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLAuth.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLAuth.java @@ -68,6 +68,7 @@ protected void beforeStartCluster() { envMap.put("tokenSecretKey", AuthTokenUtils.encodeKeyBase64(secretKey)); envMap.put("superUserRoles", "admin"); envMap.put("brokerDeleteInactiveTopicsEnabled", "false"); + envMap.put("topicLevelPoliciesEnabled", "false"); for (BrokerContainer brokerContainer : pulsarCluster.getBrokers()) { brokerContainer.withEnv(envMap); diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/schema/SchemaTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/schema/SchemaTest.java index 8bb6de74c661d..d0421063b2d90 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/schema/SchemaTest.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/schema/SchemaTest.java @@ -31,6 +31,8 @@ import org.apache.pulsar.client.api.schema.SchemaDefinition; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.schema.SchemaInfo; +import org.apache.pulsar.common.schema.SchemaType; import org.apache.pulsar.tests.integration.schema.Schemas.Person; import org.apache.pulsar.tests.integration.schema.Schemas.PersonConsumeSchema; import org.apache.pulsar.tests.integration.schema.Schemas.Student; @@ -316,5 +318,14 @@ public void testPrimitiveSchemaTypeCompatibilityCheck() { } + @Test + public void testDeletePartitionedTopicWhenTopicReferenceIsNotReady() throws Exception { + final String topic = "persistent://public/default/tp-ref"; + admin.topics().createPartitionedTopic(topic, 20); + admin.schemas().createSchema(topic, + SchemaInfo.builder().type(SchemaType.STRING).schema(new byte[0]).build()); + admin.topics().deletePartitionedTopic(topic, false); + } + } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/tls/ClientTlsTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/tls/ClientTlsTest.java index 59ff978cafa06..080912cd49262 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/tls/ClientTlsTest.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/tls/ClientTlsTest.java @@ -29,6 +29,7 @@ import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.tests.integration.suites.PulsarTestSuite; +import org.apache.pulsar.tests.integration.topologies.PulsarClusterSpec; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -41,6 +42,14 @@ private static String loadCertificateAuthorityFile(String name) { return Resources.getResource("certificate-authority/" + name).getPath(); } + @Override + protected PulsarClusterSpec.PulsarClusterSpecBuilder beforeSetupCluster( + String clusterName, + PulsarClusterSpec.PulsarClusterSpecBuilder specBuilder) { + specBuilder.enableTls(true); + return specBuilder; + } + @DataProvider(name = "adminUrls") public Object[][] adminUrls() { return new Object[][]{ diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java index 9b4823f46d4cc..e6a425956cf0b 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java @@ -38,6 +38,7 @@ import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.commons.io.IOUtils; +import org.apache.pulsar.client.impl.auth.AuthenticationTls; import org.apache.pulsar.tests.integration.containers.BKContainer; import org.apache.pulsar.tests.integration.containers.BrokerContainer; import org.apache.pulsar.tests.integration.containers.CSContainer; @@ -132,14 +133,16 @@ private PulsarCluster(PulsarClusterSpec spec, CSContainer csContainer, boolean s this.brokerContainers = Maps.newTreeMap(); this.workerContainers = Maps.newTreeMap(); - this.proxyContainer = new ProxyContainer(appendClusterName("pulsar-proxy"), ProxyContainer.NAME) + this.proxyContainer = new ProxyContainer(appendClusterName("pulsar-proxy"), ProxyContainer.NAME, spec.enableTls) .withNetwork(network) .withNetworkAliases(appendClusterName("pulsar-proxy")) .withEnv("zkServers", appendClusterName(ZKContainer.NAME)) .withEnv("zookeeperServers", appendClusterName(ZKContainer.NAME)) .withEnv("configurationStoreServers", CSContainer.NAME + ":" + CS_PORT) - .withEnv("clusterName", clusterName) + .withEnv("clusterName", clusterName); // enable mTLS + if (spec.enableTls) { + proxyContainer .withEnv("webServicePortTls", String.valueOf(BROKER_HTTPS_PORT)) .withEnv("servicePortTls", String.valueOf(BROKER_PORT_TLS)) .withEnv("forwardAuthorizationCredentials", "true") @@ -147,7 +150,15 @@ private PulsarCluster(PulsarClusterSpec spec, CSContainer csContainer, boolean s .withEnv("tlsAllowInsecureConnection", "false") .withEnv("tlsCertificateFilePath", "/pulsar/certificate-authority/server-keys/proxy.cert.pem") .withEnv("tlsKeyFilePath", "/pulsar/certificate-authority/server-keys/proxy.key-pk8.pem") - .withEnv("tlsTrustCertsFilePath", "/pulsar/certificate-authority/certs/ca.cert.pem"); + .withEnv("tlsTrustCertsFilePath", "/pulsar/certificate-authority/certs/ca.cert.pem") + .withEnv("brokerClientAuthenticationPlugin", AuthenticationTls.class.getName()) + .withEnv("brokerClientAuthenticationParameters", String.format("tlsCertFile:%s,tlsKeyFile:%s", "/pulsar/certificate-authority/client-keys/admin.cert.pem", "/pulsar/certificate-authority/client-keys/admin.key-pk8.pem")) + .withEnv("tlsEnabledWithBroker", "true") + .withEnv("brokerClientTrustCertsFilePath", "/pulsar/certificate-authority/certs/ca.cert.pem") + .withEnv("brokerClientCertificateFilePath", "/pulsar/certificate-authority/server-keys/proxy.cert.pem") + .withEnv("brokerClientKeyFilePath", "/pulsar/certificate-authority/server-keys/proxy.key-pk8.pem"); + + } if (spec.proxyEnvs != null) { spec.proxyEnvs.forEach(this.proxyContainer::withEnv); } @@ -184,7 +195,7 @@ private PulsarCluster(PulsarClusterSpec spec, CSContainer csContainer, boolean s // create brokers brokerContainers.putAll( runNumContainers("broker", spec.numBrokers(), (name) -> { - BrokerContainer brokerContainer = new BrokerContainer(clusterName, appendClusterName(name)) + BrokerContainer brokerContainer = new BrokerContainer(clusterName, appendClusterName(name), spec.enableTls) .withNetwork(network) .withNetworkAliases(appendClusterName(name)) .withEnv("zkServers", appendClusterName(ZKContainer.NAME)) @@ -195,16 +206,19 @@ private PulsarCluster(PulsarClusterSpec spec, CSContainer csContainer, boolean s .withEnv("loadBalancerOverrideBrokerNicSpeedGbps", "1") // used in s3 tests .withEnv("AWS_ACCESS_KEY_ID", "accesskey").withEnv("AWS_SECRET_KEY", "secretkey") - .withEnv("maxMessageSize", "" + spec.maxMessageSize) + .withEnv("maxMessageSize", "" + spec.maxMessageSize); + if (spec.enableTls) { // enable mTLS - .withEnv("webServicePortTls", String.valueOf(BROKER_HTTPS_PORT)) - .withEnv("brokerServicePortTls", String.valueOf(BROKER_PORT_TLS)) - .withEnv("authenticateOriginalAuthData", "true") - .withEnv("tlsRequireTrustedClientCertOnConnect", "true") - .withEnv("tlsAllowInsecureConnection", "false") - .withEnv("tlsCertificateFilePath", "/pulsar/certificate-authority/server-keys/broker.cert.pem") - .withEnv("tlsKeyFilePath", "/pulsar/certificate-authority/server-keys/broker.key-pk8.pem") - .withEnv("tlsTrustCertsFilePath", "/pulsar/certificate-authority/certs/ca.cert.pem"); + brokerContainer + .withEnv("webServicePortTls", String.valueOf(BROKER_HTTPS_PORT)) + .withEnv("brokerServicePortTls", String.valueOf(BROKER_PORT_TLS)) + .withEnv("authenticateOriginalAuthData", "true") + .withEnv("tlsAllowInsecureConnection", "false") + .withEnv("tlsRequireTrustedClientCertOnConnect", "true") + .withEnv("tlsTrustCertsFilePath", "/pulsar/certificate-authority/certs/ca.cert.pem") + .withEnv("tlsCertificateFilePath", "/pulsar/certificate-authority/server-keys/broker.cert.pem") + .withEnv("tlsKeyFilePath", "/pulsar/certificate-authority/server-keys/broker.key-pk8.pem"); + } if (spec.queryLastMessage) { brokerContainer.withEnv("bookkeeperExplicitLacIntervalInMills", "10"); brokerContainer.withEnv("bookkeeperUseV2WireProtocol", "false"); @@ -511,37 +525,18 @@ public synchronized void setupFunctionWorkers(String suffix, FunctionRuntimeType } private void startFunctionWorkersWithProcessContainerFactory(String suffix, int numFunctionWorkers) { - String serviceUrl = "pulsar://pulsar-broker-0:" + PulsarContainer.BROKER_PORT; - String httpServiceUrl = "http://pulsar-broker-0:" + PulsarContainer.BROKER_HTTP_PORT; workerContainers.putAll(runNumContainers( "functions-worker-process-" + suffix, numFunctionWorkers, - (name) -> new WorkerContainer(clusterName, name) - .withNetwork(network) - .withNetworkAliases(name) - // worker settings - .withEnv("PF_workerId", name) - .withEnv("PF_workerHostname", name) - .withEnv("PF_workerPort", "" + PulsarContainer.BROKER_HTTP_PORT) - .withEnv("PF_pulsarFunctionsCluster", clusterName) - .withEnv("PF_pulsarServiceUrl", serviceUrl) - .withEnv("PF_pulsarWebServiceUrl", httpServiceUrl) - // script - .withEnv("clusterName", clusterName) - .withEnv("zookeeperServers", ZKContainer.NAME) - // bookkeeper tools - .withEnv("zkServers", ZKContainer.NAME) + (name) -> createWorkerContainer(name) )); this.startWorkers(); } - private void startFunctionWorkersWithThreadContainerFactory(String suffix, int numFunctionWorkers) { + private WorkerContainer createWorkerContainer(String name) { String serviceUrl = "pulsar://pulsar-broker-0:" + PulsarContainer.BROKER_PORT; String httpServiceUrl = "http://pulsar-broker-0:" + PulsarContainer.BROKER_HTTP_PORT; - workerContainers.putAll(runNumContainers( - "functions-worker-thread-" + suffix, - numFunctionWorkers, - (name) -> new WorkerContainer(clusterName, name) + return new WorkerContainer(clusterName, name) .withNetwork(network) .withNetworkAliases(name) // worker settings @@ -551,13 +546,21 @@ private void startFunctionWorkersWithThreadContainerFactory(String suffix, int n .withEnv("PF_pulsarFunctionsCluster", clusterName) .withEnv("PF_pulsarServiceUrl", serviceUrl) .withEnv("PF_pulsarWebServiceUrl", httpServiceUrl) - .withEnv("PF_functionRuntimeFactoryClassName", "org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory") - .withEnv("PF_functionRuntimeFactoryConfigs_threadGroupName", "pf-container-group") // script .withEnv("clusterName", clusterName) .withEnv("zookeeperServers", ZKContainer.NAME) // bookkeeper tools - .withEnv("zkServers", ZKContainer.NAME) + .withEnv("zkServers", ZKContainer.NAME); + } + + private void startFunctionWorkersWithThreadContainerFactory(String suffix, int numFunctionWorkers) { + workerContainers.putAll(runNumContainers( + "functions-worker-thread-" + suffix, + numFunctionWorkers, + (name) -> createWorkerContainer(name) + .withEnv("PF_functionRuntimeFactoryClassName", + "org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory") + .withEnv("PF_functionRuntimeFactoryConfigs_threadGroupName", "pf-container-group") )); this.startWorkers(); } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java index fa28d20e6b356..c141e990d62e0 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java @@ -177,4 +177,10 @@ public class PulsarClusterSpec { * Additional ports to expose on bookie containers. */ List bookieAdditionalPorts; + + /** + * Enable TLS for connection. + */ + @Default + boolean enableTls = false; } diff --git a/tests/integration/src/test/resources/pulsar-auth.xml b/tests/integration/src/test/resources/pulsar-auth.xml index 81d2c1361a269..3647730dee201 100644 --- a/tests/integration/src/test/resources/pulsar-auth.xml +++ b/tests/integration/src/test/resources/pulsar-auth.xml @@ -1,3 +1,4 @@ + - + - - + + - + diff --git a/tests/integration/src/test/resources/pulsar-backwards-compatibility.xml b/tests/integration/src/test/resources/pulsar-backwards-compatibility.xml index b50af74ed17f0..b0f55f3e04b16 100644 --- a/tests/integration/src/test/resources/pulsar-backwards-compatibility.xml +++ b/tests/integration/src/test/resources/pulsar-backwards-compatibility.xml @@ -1,3 +1,4 @@ + - + - - - - - - - - - - - - - \ No newline at end of file + + + + + + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-cli.xml b/tests/integration/src/test/resources/pulsar-cli.xml index af55aca8a0098..1243dc7e485d7 100644 --- a/tests/integration/src/test/resources/pulsar-cli.xml +++ b/tests/integration/src/test/resources/pulsar-cli.xml @@ -1,3 +1,4 @@ + - + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-function.xml b/tests/integration/src/test/resources/pulsar-function.xml index a18a65ff2e16d..03a8d83997a3c 100644 --- a/tests/integration/src/test/resources/pulsar-function.xml +++ b/tests/integration/src/test/resources/pulsar-function.xml @@ -1,3 +1,4 @@ + - + - - - - - - - - + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-io-ora-source.xml b/tests/integration/src/test/resources/pulsar-io-ora-source.xml index 1c5bb5faf677c..0a9927015be86 100644 --- a/tests/integration/src/test/resources/pulsar-io-ora-source.xml +++ b/tests/integration/src/test/resources/pulsar-io-ora-source.xml @@ -1,3 +1,4 @@ + - + - - - - - + + + + + diff --git a/tests/integration/src/test/resources/pulsar-io-sinks.xml b/tests/integration/src/test/resources/pulsar-io-sinks.xml index 614c371f3e52c..046f977f3e13b 100644 --- a/tests/integration/src/test/resources/pulsar-io-sinks.xml +++ b/tests/integration/src/test/resources/pulsar-io-sinks.xml @@ -1,3 +1,4 @@ + - + - - - - - - \ No newline at end of file + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-io-sources.xml b/tests/integration/src/test/resources/pulsar-io-sources.xml index 636b3e479195f..c995a7df249dd 100644 --- a/tests/integration/src/test/resources/pulsar-io-sources.xml +++ b/tests/integration/src/test/resources/pulsar-io-sources.xml @@ -1,3 +1,4 @@ + - + - - - - - - - - \ No newline at end of file + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-loadbalance.xml b/tests/integration/src/test/resources/pulsar-loadbalance.xml index dfc4536e25592..d28dd1e0da7ee 100644 --- a/tests/integration/src/test/resources/pulsar-loadbalance.xml +++ b/tests/integration/src/test/resources/pulsar-loadbalance.xml @@ -1,3 +1,4 @@ + - + - - - - - - \ No newline at end of file + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-messaging.xml b/tests/integration/src/test/resources/pulsar-messaging.xml index cfbdb22587034..f8813b614f5cb 100644 --- a/tests/integration/src/test/resources/pulsar-messaging.xml +++ b/tests/integration/src/test/resources/pulsar-messaging.xml @@ -1,3 +1,4 @@ + - + - - - - - - - - - - - - \ No newline at end of file + + + + + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-plugin.xml b/tests/integration/src/test/resources/pulsar-plugin.xml index f88b67256e547..06fa8ee628c0f 100644 --- a/tests/integration/src/test/resources/pulsar-plugin.xml +++ b/tests/integration/src/test/resources/pulsar-plugin.xml @@ -1,3 +1,4 @@ + - + - - - - - - - - + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-process.xml b/tests/integration/src/test/resources/pulsar-process.xml index 8e5258d30624c..a6f1ac468d954 100644 --- a/tests/integration/src/test/resources/pulsar-process.xml +++ b/tests/integration/src/test/resources/pulsar-process.xml @@ -1,3 +1,4 @@ + - + - - - - - - - - - + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-proxy.xml b/tests/integration/src/test/resources/pulsar-proxy.xml index ae6f13810535c..fabfb54d781a3 100644 --- a/tests/integration/src/test/resources/pulsar-proxy.xml +++ b/tests/integration/src/test/resources/pulsar-proxy.xml @@ -1,3 +1,4 @@ + - + - - - - - - \ No newline at end of file + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-python.xml b/tests/integration/src/test/resources/pulsar-python.xml index a5faa6389e0f1..365768083d21b 100644 --- a/tests/integration/src/test/resources/pulsar-python.xml +++ b/tests/integration/src/test/resources/pulsar-python.xml @@ -1,3 +1,4 @@ + - + - - - - - + + + + + diff --git a/tests/integration/src/test/resources/pulsar-schema.xml b/tests/integration/src/test/resources/pulsar-schema.xml index e07fdf2b2d86f..595bb8fda5521 100644 --- a/tests/integration/src/test/resources/pulsar-schema.xml +++ b/tests/integration/src/test/resources/pulsar-schema.xml @@ -1,3 +1,4 @@ + - + - - - - - - - + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-semantics.xml b/tests/integration/src/test/resources/pulsar-semantics.xml index 5b5402af4623b..69e171931f0e4 100644 --- a/tests/integration/src/test/resources/pulsar-semantics.xml +++ b/tests/integration/src/test/resources/pulsar-semantics.xml @@ -1,3 +1,4 @@ + - + - - - - - + + + + + diff --git a/tests/integration/src/test/resources/pulsar-sql.xml b/tests/integration/src/test/resources/pulsar-sql.xml index 1ab4d479ad041..a7e0995f1f2d0 100644 --- a/tests/integration/src/test/resources/pulsar-sql.xml +++ b/tests/integration/src/test/resources/pulsar-sql.xml @@ -1,3 +1,4 @@ + - + - - - - - - - + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-standalone.xml b/tests/integration/src/test/resources/pulsar-standalone.xml index d8892c0746181..45d79a72821b5 100644 --- a/tests/integration/src/test/resources/pulsar-standalone.xml +++ b/tests/integration/src/test/resources/pulsar-standalone.xml @@ -1,3 +1,4 @@ + - + - - - - - - \ No newline at end of file + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-thread.xml b/tests/integration/src/test/resources/pulsar-thread.xml index cf3da15e8e1ad..dc1b8e3b3de15 100644 --- a/tests/integration/src/test/resources/pulsar-thread.xml +++ b/tests/integration/src/test/resources/pulsar-thread.xml @@ -1,3 +1,4 @@ + - + - - - - - - - - - - \ No newline at end of file + + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-tls.xml b/tests/integration/src/test/resources/pulsar-tls.xml index 153d14b62a725..88e90c43107fb 100644 --- a/tests/integration/src/test/resources/pulsar-tls.xml +++ b/tests/integration/src/test/resources/pulsar-tls.xml @@ -1,3 +1,4 @@ + - + - - - - - + + + + + diff --git a/tests/integration/src/test/resources/pulsar-transaction.xml b/tests/integration/src/test/resources/pulsar-transaction.xml index 72c375d000d8e..425f58ebda639 100644 --- a/tests/integration/src/test/resources/pulsar-transaction.xml +++ b/tests/integration/src/test/resources/pulsar-transaction.xml @@ -1,3 +1,4 @@ + - + - - - - - + + + + + diff --git a/tests/integration/src/test/resources/pulsar-upgrade.xml b/tests/integration/src/test/resources/pulsar-upgrade.xml index a52db54753372..cb312b04ed7a3 100644 --- a/tests/integration/src/test/resources/pulsar-upgrade.xml +++ b/tests/integration/src/test/resources/pulsar-upgrade.xml @@ -1,3 +1,4 @@ + - + - - - - - + + + + + diff --git a/tests/integration/src/test/resources/pulsar-websocket.xml b/tests/integration/src/test/resources/pulsar-websocket.xml index 87bf832d4e40a..e5023451518e0 100644 --- a/tests/integration/src/test/resources/pulsar-websocket.xml +++ b/tests/integration/src/test/resources/pulsar-websocket.xml @@ -1,3 +1,4 @@ + - + - - - - - - \ No newline at end of file + + + + + + diff --git a/tests/integration/src/test/resources/pulsar.xml b/tests/integration/src/test/resources/pulsar.xml index d309a7695dd0b..ada31c2e5da8c 100644 --- a/tests/integration/src/test/resources/pulsar.xml +++ b/tests/integration/src/test/resources/pulsar.xml @@ -1,3 +1,4 @@ + - + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + diff --git a/tests/integration/src/test/resources/tiered-filesystem-storage.xml b/tests/integration/src/test/resources/tiered-filesystem-storage.xml index b14077594e0c9..c8cb694ea2d02 100644 --- a/tests/integration/src/test/resources/tiered-filesystem-storage.xml +++ b/tests/integration/src/test/resources/tiered-filesystem-storage.xml @@ -1,3 +1,4 @@ + - + - - - - - - \ No newline at end of file + + + + + + diff --git a/tests/integration/src/test/resources/tiered-jcloud-storage.xml b/tests/integration/src/test/resources/tiered-jcloud-storage.xml index f61f9f5fa1612..be699ba0a8680 100644 --- a/tests/integration/src/test/resources/tiered-jcloud-storage.xml +++ b/tests/integration/src/test/resources/tiered-jcloud-storage.xml @@ -1,3 +1,4 @@ + - + - - - - - - \ No newline at end of file + + + + + + diff --git a/tests/pom.xml b/tests/pom.xml index ecdff86a8b1b2..f03dcc35b4844 100644 --- a/tests/pom.xml +++ b/tests/pom.xml @@ -1,4 +1,4 @@ - + - + pom 4.0.0 - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT - org.apache.pulsar.tests + io.streamnative.tests tests-parent Apache Pulsar :: Tests @@ -50,7 +49,7 @@ skipIntegrationTests - + integrationTests diff --git a/tests/pulsar-client-admin-shade-test/pom.xml b/tests/pulsar-client-admin-shade-test/pom.xml index b0019d01b04ee..0cb5af8dcebee 100644 --- a/tests/pulsar-client-admin-shade-test/pom.xml +++ b/tests/pulsar-client-admin-shade-test/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - org.apache.pulsar.tests - tests-parent - 3.1.0-SNAPSHOT - - - pulsar-client-admin-shade-test - jar - Apache Pulsar :: Tests :: Pulsar-Client-Admin-Shade Test - - - - - org.apache.pulsar - pulsar-client-admin - ${project.version} - test - - - - org.apache.pulsar - pulsar-client-admin-api - ${project.version} - test - - - - org.testcontainers - testcontainers - test - - - - org.apache.pulsar - pulsar-client-messagecrypto-bc - ${project.version} - test - - - - - + + 4.0.0 + + io.streamnative.tests + tests-parent + 3.1.0-SNAPSHOT + + pulsar-client-admin-shade-test + jar + Apache Pulsar :: Tests :: Pulsar-Client-Admin-Shade Test + + + io.streamnative + pulsar-client-admin + ${project.version} + test + + + io.streamnative + pulsar-client-admin-api + ${project.version} + test + + + org.testcontainers + testcontainers + test + + + io.streamnative + pulsar-client-messagecrypto-bc + ${project.version} + test + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + true + + ${project.version} + ${project.build.directory} + + + + + + + + ShadeTests + + + ShadeTests + + + - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - - true - - ${project.version} - ${project.build.directory} - - - - - - - - - ShadeTests - - - ShadeTests - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G + + org.apache.maven.plugins + maven-surefire-plugin + + ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G -Dio.netty.leakDetectionLevel=advanced ${test.additional.args} - false - - src/test/resources/pulsar.xml - - 1 - - - - - - + false + + src/test/resources/pulsar.xml + + 1 + + + + + + diff --git a/tests/pulsar-client-admin-shade-test/src/test/resources/pulsar.xml b/tests/pulsar-client-admin-shade-test/src/test/resources/pulsar.xml index d95dd95ae1775..2476755a95f7e 100644 --- a/tests/pulsar-client-admin-shade-test/src/test/resources/pulsar.xml +++ b/tests/pulsar-client-admin-shade-test/src/test/resources/pulsar.xml @@ -1,3 +1,4 @@ + - + - - - - - - + + + + + + diff --git a/tests/pulsar-client-all-shade-test/pom.xml b/tests/pulsar-client-all-shade-test/pom.xml index db11fec205a93..775e291cb0ae6 100644 --- a/tests/pulsar-client-all-shade-test/pom.xml +++ b/tests/pulsar-client-all-shade-test/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - org.apache.pulsar.tests - tests-parent - 3.1.0-SNAPSHOT - - - pulsar-client-all-shade-test - jar - Apache Pulsar :: Tests :: Pulsar-Client-All-Shade Test - - - - - org.apache.pulsar - pulsar-client-all - ${project.version} - test - - - - org.testcontainers - testcontainers - test - - - - org.apache.pulsar - bouncy-castle-bc - ${project.version} - pkg - - - - org.apache.pulsar - pulsar-client-messagecrypto-bc - ${project.version} - - - - - + + 4.0.0 + + io.streamnative.tests + tests-parent + 3.1.0-SNAPSHOT + + pulsar-client-all-shade-test + jar + Apache Pulsar :: Tests :: Pulsar-Client-All-Shade Test + + + io.streamnative + pulsar-client-all + ${project.version} + test + + + org.testcontainers + testcontainers + test + + + io.streamnative + bouncy-castle-bc + ${project.version} + pkg + + + io.streamnative + pulsar-client-messagecrypto-bc + ${project.version} + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + true + + ${project.version} + ${project.build.directory} + + + + + + + + ShadeTests + + + ShadeTests + + + - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - - true - - ${project.version} - ${project.build.directory} - - - - - - - - - ShadeTests - - - ShadeTests - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G + + org.apache.maven.plugins + maven-surefire-plugin + + ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G -Dio.netty.leakDetectionLevel=advanced ${test.additional.args} - false - - src/test/resources/pulsar.xml - - 1 - - - - - - + false + + src/test/resources/pulsar.xml + + 1 + + + + + + diff --git a/tests/pulsar-client-all-shade-test/src/test/resources/pulsar.xml b/tests/pulsar-client-all-shade-test/src/test/resources/pulsar.xml index 5c725f80eaea6..cfbc5ce59488b 100644 --- a/tests/pulsar-client-all-shade-test/src/test/resources/pulsar.xml +++ b/tests/pulsar-client-all-shade-test/src/test/resources/pulsar.xml @@ -1,3 +1,4 @@ + - + - - - - - - + + + + + + diff --git a/tests/pulsar-client-shade-test/pom.xml b/tests/pulsar-client-shade-test/pom.xml index cf5aff0291fb7..e332e5d59107c 100644 --- a/tests/pulsar-client-shade-test/pom.xml +++ b/tests/pulsar-client-shade-test/pom.xml @@ -1,4 +1,4 @@ - + - - 4.0.0 - - - org.apache.pulsar.tests - tests-parent - 3.1.0-SNAPSHOT - - - pulsar-client-shade-test - jar - Apache Pulsar :: Tests :: Pulsar-Client-Shade Test - - - - - org.apache.pulsar - pulsar-client - ${project.version} - test - - - - org.apache.pulsar - pulsar-client-admin - ${project.version} - test - - - - org.testcontainers - testcontainers - test - - - - - + + 4.0.0 + + io.streamnative.tests + tests-parent + 3.1.0-SNAPSHOT + + pulsar-client-shade-test + jar + Apache Pulsar :: Tests :: Pulsar-Client-Shade Test + + + io.streamnative + pulsar-client + ${project.version} + test + + + io.streamnative + pulsar-client-admin + ${project.version} + test + + + org.testcontainers + testcontainers + test + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + true + + ${project.version} + ${project.build.directory} + + + + + + + + ShadeTests + + + ShadeTests + + + - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - - true - - ${project.version} - ${project.build.directory} - - - - - - - - - ShadeTests - - - ShadeTests - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G + + org.apache.maven.plugins + maven-surefire-plugin + + ${testJacocoAgentArgument} -XX:+ExitOnOutOfMemoryError -Xmx2G -XX:MaxDirectMemorySize=8G -Dio.netty.leakDetectionLevel=advanced ${test.additional.args} - false - - src/test/resources/pulsar.xml - - 1 - - - - - - + false + + src/test/resources/pulsar.xml + + 1 + + + + + + diff --git a/tests/pulsar-client-shade-test/src/test/resources/pulsar.xml b/tests/pulsar-client-shade-test/src/test/resources/pulsar.xml index 6132ad05f6ccf..e2ac6a9ee4f30 100644 --- a/tests/pulsar-client-shade-test/src/test/resources/pulsar.xml +++ b/tests/pulsar-client-shade-test/src/test/resources/pulsar.xml @@ -1,3 +1,4 @@ + - + - - - - - - + + + + + + diff --git a/tiered-storage/file-system/pom.xml b/tiered-storage/file-system/pom.xml index 51986d17c3dbf..9d97a7e4dd548 100644 --- a/tiered-storage/file-system/pom.xml +++ b/tiered-storage/file-system/pom.xml @@ -1,3 +1,4 @@ + - - 4.0.0 - - - org.apache.pulsar - tiered-storage-parent - 3.1.0-SNAPSHOT - .. - - - tiered-storage-file-system - Apache Pulsar :: Tiered Storage :: File System - - - ${project.groupId} - managed-ledger - ${project.version} - - - org.apache.hadoop - hadoop-common - ${hdfs-offload-version3} - - - log4j - log4j - - - org.slf4j - slf4j-log4j12 - - - - - - org.apache.hadoop - hadoop-hdfs-client - ${hdfs-offload-version3} - - - org.apache.avro - avro - - - org.mortbay.jetty - jetty - - - com.sun.jersey - jersey-core - - - com.sun.jersey - jersey-server - - - javax.servlet - servlet-api - - - - - - org.apache.avro - avro - ${avro.version} - - - - net.minidev - json-smart - ${json-smart.version} - - - com.google.protobuf - protobuf-java - - - - ${project.groupId} - testmocks - ${project.version} - test - - - - org.apache.hadoop - hadoop-minicluster - ${hdfs-offload-version3} - test - - - io.netty - netty-all - - - org.bouncycastle - * - - - - - - org.bouncycastle - bcpkix-jdk18on - test - - - + + 4.0.0 + + io.streamnative + tiered-storage-parent + 3.1.0-SNAPSHOT + .. + + tiered-storage-file-system + Apache Pulsar :: Tiered Storage :: File System + + + ${project.groupId} + managed-ledger + ${project.version} + + + org.apache.hadoop + hadoop-common + ${hdfs-offload-version3} + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + + + + org.apache.hadoop + hadoop-hdfs-client + ${hdfs-offload-version3} + + + org.apache.avro + avro + + + org.mortbay.jetty + jetty + + + com.sun.jersey + jersey-core + + + com.sun.jersey + jersey-server + + + javax.servlet + servlet-api + + + + + + org.apache.avro + avro + ${avro.version} + + + + net.minidev + json-smart + ${json-smart.version} + + + com.google.protobuf + protobuf-java + + + ${project.groupId} + testmocks + ${project.version} + test + + + org.apache.hadoop + hadoop-minicluster + ${hdfs-offload-version3} + test + + io.netty - netty-codec-http - - - - org.eclipse.jetty - jetty-server - test - - - org.eclipse.jetty - jetty-alpn-conscrypt-server - test - - - org.eclipse.jetty - jetty-servlet - test - - - org.eclipse.jetty - jetty-util - test - - - - - - - org.apache.nifi - nifi-nar-maven-plugin - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - ${basedir}/src/main/resources/findbugsExclude.xml - - - - spotbugs - verify - - check - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - - checkstyle - verify - - check - - - - - - - - - - owasp-dependency-check - - - - org.owasp - dependency-check-maven - ${dependency-check-maven.version} - - - - aggregate - - none - - - - - - - + + owasp-dependency-check + + + + org.owasp + dependency-check-maven + ${dependency-check-maven.version} + + + + aggregate + + none + + + + + + + diff --git a/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileStoreBackedReadHandleImpl.java b/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileStoreBackedReadHandleImpl.java index 49b2071f5db2c..91e7e902eab8a 100644 --- a/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileStoreBackedReadHandleImpl.java +++ b/tiered-storage/file-system/src/main/java/org/apache/bookkeeper/mledger/offload/filesystem/impl/FileStoreBackedReadHandleImpl.java @@ -27,6 +27,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.api.LastConfirmedAndEntry; import org.apache.bookkeeper.client.api.LedgerEntries; @@ -36,6 +37,7 @@ import org.apache.bookkeeper.client.impl.LedgerEntriesImpl; import org.apache.bookkeeper.client.impl.LedgerEntryImpl; import org.apache.bookkeeper.mledger.LedgerOffloaderStats; +import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.MapFile; @@ -53,6 +55,12 @@ public class FileStoreBackedReadHandleImpl implements ReadHandle { private final LedgerOffloaderStats offloaderStats; private final String managedLedgerName; private final String topicName; + enum State { + Opened, + Closed + } + private volatile State state; + private final AtomicReference> closeFuture = new AtomicReference<>(); private FileStoreBackedReadHandleImpl(ExecutorService executor, MapFile.Reader reader, long ledgerId, LedgerOffloaderStats offloaderStats, @@ -72,6 +80,7 @@ private FileStoreBackedReadHandleImpl(ExecutorService executor, MapFile.Reader r offloaderStats.recordReadOffloadIndexLatency(topicName, System.nanoTime() - startReadIndexTime, TimeUnit.NANOSECONDS); this.ledgerMetadata = parseLedgerMetadata(ledgerId, value.copyBytes()); + state = State.Opened; } catch (IOException e) { log.error("Fail to read LedgerMetadata for ledgerId {}", ledgerId); @@ -92,15 +101,20 @@ public LedgerMetadata getLedgerMetadata() { @Override public CompletableFuture closeAsync() { - CompletableFuture promise = new CompletableFuture<>(); + if (closeFuture.get() != null || !closeFuture.compareAndSet(null, new CompletableFuture<>())) { + return closeFuture.get(); + } + + CompletableFuture promise = closeFuture.get(); executor.execute(() -> { - try { - reader.close(); - promise.complete(null); - } catch (IOException t) { - promise.completeExceptionally(t); - } - }); + try { + reader.close(); + state = State.Closed; + promise.complete(null); + } catch (IOException t) { + promise.completeExceptionally(t); + } + }); return promise; } @@ -111,6 +125,12 @@ public CompletableFuture readAsync(long firstEntry, long lastEntr } CompletableFuture promise = new CompletableFuture<>(); executor.execute(() -> { + if (state == State.Closed) { + log.warn("Reading a closed read handler. Ledger ID: {}, Read range: {}-{}", + ledgerId, firstEntry, lastEntry); + promise.completeExceptionally(new ManagedLedgerException.OffloadReadHandleClosedException()); + return; + } if (firstEntry > lastEntry || firstEntry < 0 || lastEntry > getLastAddConfirmed()) { diff --git a/tiered-storage/file-system/src/main/resources/findbugsExclude.xml b/tiered-storage/file-system/src/main/resources/findbugsExclude.xml index 051f0e68e257a..1b8cc1ac799ca 100644 --- a/tiered-storage/file-system/src/main/resources/findbugsExclude.xml +++ b/tiered-storage/file-system/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 - - org.apache.pulsar + io.streamnative tiered-storage-parent 3.1.0-SNAPSHOT .. - tiered-storage-jcloud Apache Pulsar :: Tiered Storage :: JCloud - ${project.groupId} managed-ledger ${project.version} - ${project.groupId} jclouds-shaded @@ -74,7 +70,6 @@ - org.apache.jclouds jclouds-allblobstore @@ -85,12 +80,10 @@ com.amazonaws aws-java-sdk-core - com.amazonaws aws-java-sdk-sts - ${project.groupId} testmocks @@ -103,7 +96,6 @@ ${jclouds.version} provided - javax.xml.bind jaxb-api @@ -115,13 +107,11 @@ runtime - com.sun.activation javax.activation runtime - @@ -129,7 +119,6 @@ org.apache.nifi nifi-nar-maven-plugin - com.github.spotbugs spotbugs-maven-plugin diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java index 5a571bb208e34..5346be6a044c8 100644 --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.mledger.offload.jcloud.impl; +import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import io.netty.buffer.ByteBuf; @@ -30,6 +31,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.api.LastConfirmedAndEntry; import org.apache.bookkeeper.client.api.LedgerEntries; @@ -66,13 +68,14 @@ public class BlobStoreBackedReadHandleImpl implements ReadHandle { .newBuilder() .expireAfterAccess(CACHE_TTL_SECONDS, TimeUnit.SECONDS) .build(); + private final AtomicReference> closeFuture = new AtomicReference<>(); enum State { Opened, Closed } - private State state = null; + private volatile State state = null; private BlobStoreBackedReadHandleImpl(long ledgerId, OffloadIndexBlock index, BackedInputStream inputStream, ExecutorService executor) { @@ -96,18 +99,22 @@ public LedgerMetadata getLedgerMetadata() { @Override public CompletableFuture closeAsync() { - CompletableFuture promise = new CompletableFuture<>(); + if (closeFuture.get() != null || !closeFuture.compareAndSet(null, new CompletableFuture<>())) { + return closeFuture.get(); + } + + CompletableFuture promise = closeFuture.get(); executor.execute(() -> { - try { - index.close(); - inputStream.close(); - entryOffsets.invalidateAll(); - state = State.Closed; - promise.complete(null); - } catch (IOException t) { - promise.completeExceptionally(t); - } - }); + try { + index.close(); + inputStream.close(); + entryOffsets.invalidateAll(); + state = State.Closed; + promise.complete(null); + } catch (IOException t) { + promise.completeExceptionally(t); + } + }); return promise; } @@ -298,6 +305,7 @@ public static ReadHandle open(ScheduledExecutorService executor, } // for testing + @VisibleForTesting State getState() { return this.state; } diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java index e40a0a3834c85..53d96e08abf5e 100644 --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java @@ -30,6 +30,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import lombok.val; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.api.LastConfirmedAndEntry; @@ -60,7 +61,8 @@ public class BlobStoreBackedReadHandleImplV2 implements ReadHandle { private final List inputStreams; private final List dataStreams; private final ExecutorService executor; - private State state = null; + private volatile State state = null; + private final AtomicReference> closeFuture = new AtomicReference<>(); enum State { Opened, @@ -123,7 +125,11 @@ public LedgerMetadata getLedgerMetadata() { @Override public CompletableFuture closeAsync() { - CompletableFuture promise = new CompletableFuture<>(); + if (closeFuture.get() != null || !closeFuture.compareAndSet(null, new CompletableFuture<>())) { + return closeFuture.get(); + } + + CompletableFuture promise = closeFuture.get(); executor.execute(() -> { try { for (OffloadIndexBlockV2 indexBlock : indices) { @@ -143,7 +149,9 @@ public CompletableFuture closeAsync() { @Override public CompletableFuture readAsync(long firstEntry, long lastEntry) { - log.debug("Ledger {}: reading {} - {}", getId(), firstEntry, lastEntry); + if (log.isDebugEnabled()) { + log.debug("Ledger {}: reading {} - {}", getId(), firstEntry, lastEntry); + } CompletableFuture promise = new CompletableFuture<>(); executor.execute(() -> { if (state == State.Closed) { diff --git a/tiered-storage/jcloud/src/main/resources/findbugsExclude.xml b/tiered-storage/jcloud/src/main/resources/findbugsExclude.xml index a8a9b9aae925f..c6ad70823a5ac 100644 --- a/tiered-storage/jcloud/src/main/resources/findbugsExclude.xml +++ b/tiered-storage/jcloud/src/main/resources/findbugsExclude.xml @@ -1,3 +1,4 @@ + - + 4.0.0 pom - org.apache.pulsar + io.streamnative pulsar 3.1.0-SNAPSHOT .. - tiered-storage-parent Apache Pulsar :: Tiered Storage :: Parent - ${project.version} - jcloud file-system