From 3e1cbf48dfe872dd1e8d4331a3f95cd29e64cc5b Mon Sep 17 00:00:00 2001 From: Zzm0809 <934230207@qq.com> Date: Fri, 22 Dec 2023 14:41:20 +0800 Subject: [PATCH] [Refactor][connector] delete pulsar and phinex connector module (#2716) * Spotless Apply * Spotless Apply * delete some connector * delete some connector --------- Co-authored-by: Zzm0809 --- .../dinky-connector-phoenix-1.14/pom.xml | 72 --- .../phoenix/JdbcConnectionOptions.java | 123 ----- .../phoenix/JdbcExactlyOnceOptions.java | 178 ------- .../phoenix/JdbcExecutionOptions.java | 108 ----- .../phoenix/JdbcStatementBuilder.java | 40 -- .../connector/phoenix/PhoenixInputFormat.java | 449 ------------------ .../phoenix/dialect/AbstractDialect.java | 89 ---- .../phoenix/dialect/JdbcDialect.java | 159 ------- .../phoenix/dialect/JdbcDialects.java | 40 -- .../phoenix/dialect/PhoenixDialect.java | 143 ------ .../internal/AbstractJdbcOutputFormat.java | 72 --- .../internal/GenericJdbcSinkFunction.java | 69 --- .../internal/JdbcBatchingOutputFormat.java | 377 --------------- .../internal/TableJdbcUpsertOutputFormat.java | 195 -------- .../connection/JdbcConnectionProvider.java | 69 --- .../PhoneixJdbcConnectionProvider.java | 157 ------ .../converter/AbstractJdbcRowConverter.java | 256 ---------- .../internal/converter/JdbcRowConverter.java | 50 -- .../converter/PhoenixRowConverter.java | 41 -- .../executor/InsertOrUpdateJdbcExecutor.java | 139 ------ .../executor/JdbcBatchStatementExecutor.java | 57 --- .../executor/KeyedBatchStatementExecutor.java | 90 ---- .../SimpleBatchStatementExecutor.java | 87 ---- .../TableBufferReducedStatementExecutor.java | 110 ----- .../TableBufferedStatementExecutor.java | 73 --- .../TableInsertOrUpdateStatementExecutor.java | 116 ----- .../TableSimpleStatementExecutor.java | 76 --- .../internal/options/JdbcDmlOptions.java | 152 ------ .../internal/options/JdbcInsertOptions.java | 54 --- .../internal/options/JdbcLookupOptions.java | 96 ---- .../phoenix/internal/options/JdbcOptions.java | 243 ---------- .../internal/options/JdbcReadOptions.java | 174 ------- .../options/JdbcTypedQueryOptions.java | 50 -- .../options/PhoenixJdbcExecutionOptions.java | 105 ---- .../options/PhoenixJdbcLookupOptions.java | 89 ---- .../internal/options/PhoenixJdbcOptions.java | 221 --------- .../options/PhoenixJdbcReadOptions.java | 157 ------ .../JdbcGenericParameterValuesProvider.java | 45 -- .../JdbcNumericBetweenParametersProvider.java | 119 ----- .../split/JdbcParameterValuesProvider.java | 37 -- .../FieldNamedPreparedStatement.java | 253 ---------- .../FieldNamedPreparedStatementImpl.java | 244 ---------- .../phoenix/statement/StatementFactory.java | 30 -- .../table/PhoenixDynamicTableFactory.java | 328 ------------- .../table/PhoenixDynamicTableSink.java | 121 ----- .../table/PhoenixDynamicTableSource.java | 178 ------- ...PhoenixJdbcDynamicOutputFormatBuilder.java | 234 --------- .../table/PhoenixJdbcRowDataInputFormat.java | 375 --------------- .../table/PhoenixJdbcSinkFunction.java | 45 -- .../phoenix/table/PhoenixLookupFunction.java | 301 ------------ .../table/PhoenixRowDataLookupFunction.java | 221 --------- .../phoenix/table/PhoenixTableSource.java | 269 ----------- .../table/PhoenixTableSourceSinkFactory.java | 245 ---------- .../phoenix/table/PhoenixUpsertTableSink.java | 227 --------- .../connector/phoenix/utils/JdbcTypeUtil.java | 130 ----- .../connector/phoenix/utils/JdbcUtils.java | 253 ---------- .../phoenix/utils/PhoenixJdbcValidator.java | 152 ------ .../org.apache.flink.table.factories.Factory | 16 - ....apache.flink.table.factories.TableFactory | 16 - .../dinky-connector-pulsar-1.14/README.md | 272 ----------- .../dinky-connector-pulsar-1.14/pom.xml | 209 -------- .../connector/pulsar/PulsarDynamicSink.java | 155 ------ .../connector/pulsar/PulsarDynamicSource.java | 226 --------- .../pulsar/PulsarDynamicTableFactory.java | 233 --------- .../connector/pulsar/PulsarSinkFunction.java | 306 ------------ .../pulsar/util/PulsarConnectionHolder.java | 87 ---- .../pulsar/util/PulsarConnectorOptions.java | 133 ------ .../util/PulsarConnectorOptionsUtil.java | 59 --- .../pulsar/util/PulsarProducerHolder.java | 91 ---- .../org.apache.flink.table.factories.Factory | 16 - .../dinky/connector/pulsar/PulsarSqlCase.java | 137 ------ .../org.apache.flink.table.factories.Factory | 16 - dinky-connectors/pom.xml | 9 - 73 files changed, 10564 deletions(-) delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/pom.xml delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcConnectionOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcExactlyOnceOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcExecutionOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcStatementBuilder.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/PhoenixInputFormat.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/AbstractDialect.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/JdbcDialect.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/JdbcDialects.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/PhoenixDialect.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/AbstractJdbcOutputFormat.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/GenericJdbcSinkFunction.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/JdbcBatchingOutputFormat.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/TableJdbcUpsertOutputFormat.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/connection/JdbcConnectionProvider.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/connection/PhoneixJdbcConnectionProvider.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/AbstractJdbcRowConverter.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/JdbcRowConverter.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/PhoenixRowConverter.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/InsertOrUpdateJdbcExecutor.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/JdbcBatchStatementExecutor.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/KeyedBatchStatementExecutor.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/SimpleBatchStatementExecutor.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableBufferReducedStatementExecutor.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableBufferedStatementExecutor.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableInsertOrUpdateStatementExecutor.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableSimpleStatementExecutor.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcDmlOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcInsertOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcLookupOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcReadOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcTypedQueryOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcExecutionOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcLookupOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcReadOptions.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcGenericParameterValuesProvider.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcNumericBetweenParametersProvider.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcParameterValuesProvider.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/FieldNamedPreparedStatement.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/FieldNamedPreparedStatementImpl.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/StatementFactory.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableFactory.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableSink.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableSource.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcDynamicOutputFormatBuilder.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcRowDataInputFormat.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcSinkFunction.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixLookupFunction.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixRowDataLookupFunction.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixTableSource.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixTableSourceSinkFactory.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixUpsertTableSink.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/JdbcTypeUtil.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/JdbcUtils.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/PhoenixJdbcValidator.java delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory delete mode 100644 dinky-connectors/dinky-connector-phoenix-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/README.md delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/pom.xml delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicSink.java delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicSource.java delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicTableFactory.java delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarSinkFunction.java delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectionHolder.java delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectorOptions.java delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectorOptionsUtil.java delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarProducerHolder.java delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/test/java/org/dinky/connector/pulsar/PulsarSqlCase.java delete mode 100644 dinky-connectors/dinky-connector-pulsar-1.14/src/test/resources/META-INF/services/org.apache.flink.table.factories.Factory diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/pom.xml b/dinky-connectors/dinky-connector-phoenix-1.14/pom.xml deleted file mode 100644 index 320b9c9781..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/pom.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - 4.0.0 - - org.dinky - dinky-connectors - ${revision} - ../pom.xml - - dinky-connector-phoenix-1.14 - - jar - - Dinky : Connector : Phoenix 1.14 - - - 4.12 - - provided - - - - - org.dinky - dinky-flink-1.14 - ${scope.runtime} - - - org.apache.phoenix - phoenix-core - 5.0.0-HBase-2.0 - ${scope.runtime} - - - org.glassfish - javax.el - - - - - - - - - org.apache.maven.plugins - maven-jar-plugin - ${maven-jar-plugin.version} - - - ${project.parent.parent.basedir}/build/extends - - - - - diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcConnectionOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcConnectionOptions.java deleted file mode 100644 index 1b396016b7..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcConnectionOptions.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix; - -import org.apache.flink.annotation.PublicEvolving; -import org.apache.flink.util.Preconditions; - -import java.io.Serializable; -import java.util.Optional; - -import javax.annotation.Nullable; - -/** JDBC connection options. */ -@PublicEvolving -public class JdbcConnectionOptions implements Serializable { - - private static final long serialVersionUID = 1L; - - protected final String url; - - @Nullable - protected final String driverName; - - protected final int connectionCheckTimeoutSeconds; - - @Nullable - protected final String username; - - @Nullable - protected final String password; - - protected JdbcConnectionOptions( - String url, String driverName, String username, String password, int connectionCheckTimeoutSeconds) { - Preconditions.checkArgument(connectionCheckTimeoutSeconds > 0); - this.url = Preconditions.checkNotNull(url, "jdbc url is empty"); - this.driverName = driverName; - this.username = username; - this.password = password; - this.connectionCheckTimeoutSeconds = connectionCheckTimeoutSeconds; - } - - public String getDbURL() { - return url; - } - - @Nullable - public String getDriverName() { - return driverName; - } - - public Optional getUsername() { - return Optional.ofNullable(username); - } - - public Optional getPassword() { - return Optional.ofNullable(password); - } - - public int getConnectionCheckTimeoutSeconds() { - return connectionCheckTimeoutSeconds; - } - - /** Builder for {@link JdbcConnectionOptions}. */ - public static class JdbcConnectionOptionsBuilder { - private String url; - private String driverName; - private String username; - private String password; - private int connectionCheckTimeoutSeconds = 60; - - public JdbcConnectionOptionsBuilder withUrl(String url) { - this.url = url; - return this; - } - - public JdbcConnectionOptionsBuilder withDriverName(String driverName) { - this.driverName = driverName; - return this; - } - - public JdbcConnectionOptionsBuilder withUsername(String username) { - this.username = username; - return this; - } - - public JdbcConnectionOptionsBuilder withPassword(String password) { - this.password = password; - return this; - } - - /** - * Set the maximum timeout between retries, default is 60 seconds. - * - * @param connectionCheckTimeoutSeconds the timeout seconds, shouldn't smaller than 1 - * second. - */ - public JdbcConnectionOptionsBuilder withConnectionCheckTimeoutSeconds(int connectionCheckTimeoutSeconds) { - this.connectionCheckTimeoutSeconds = connectionCheckTimeoutSeconds; - return this; - } - - public JdbcConnectionOptions build() { - return new JdbcConnectionOptions(url, driverName, username, password, connectionCheckTimeoutSeconds); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcExactlyOnceOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcExactlyOnceOptions.java deleted file mode 100644 index 589e4df916..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcExactlyOnceOptions.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix; - -import org.apache.flink.annotation.PublicEvolving; -import org.apache.flink.util.Preconditions; - -import java.io.Serializable; -import java.util.Optional; - -/** - * JDBC exactly once sink options. - * - *

maxCommitAttempts - maximum number of commit attempts to make per transaction; must be - * > 0; state size is proportional to the product of max number of in-flight snapshots and this - * number. - * - *

allowOutOfOrderCommits - If true, all prepared transactions will be attempted to commit - * regardless of any transient failures during this operation. This may lead to inconsistency. - * Default: false. - * - *

recoveredAndRollback - whether to rollback prepared transactions known to XA RM on - * startup (after committing known transactions, i.e. restored from state). - * - *

NOTE that setting this parameter to true may: - * - *

    - *
  1. interfere with other subtasks or applications (one subtask rolling back transactions - * prepared by the other one (and known to it)) - *
  2. block when using with some non-MVCC databases, if there are ended-not-prepared transactions - *
- * - *

See also {@link org.apache.flink.connector.jdbc.xa.XaFacade#recover()} - */ -@PublicEvolving -public class JdbcExactlyOnceOptions implements Serializable { - - private static final boolean DEFAULT_RECOVERED_AND_ROLLBACK = true; - private static final int DEFAULT_MAX_COMMIT_ATTEMPTS = 3; - private static final boolean DEFAULT_ALLOW_OUT_OF_ORDER_COMMITS = false; - public static final boolean DEFAULT_TRANSACTION_PER_CONNECTION = false; - - private final boolean discoverAndRollbackOnRecovery; - private final int maxCommitAttempts; - private final boolean allowOutOfOrderCommits; - private final Integer timeoutSec; - private final boolean transactionPerConnection; - - private JdbcExactlyOnceOptions( - boolean discoverAndRollbackOnRecovery, - int maxCommitAttempts, - boolean allowOutOfOrderCommits, - Optional timeoutSec, - boolean transactionPerConnection) { - this.discoverAndRollbackOnRecovery = discoverAndRollbackOnRecovery; - this.maxCommitAttempts = maxCommitAttempts; - this.allowOutOfOrderCommits = allowOutOfOrderCommits; - this.timeoutSec = timeoutSec.orElse(null); - this.transactionPerConnection = transactionPerConnection; - Preconditions.checkArgument(this.maxCommitAttempts > 0, "maxCommitAttempts should be > 0"); - } - - public static JdbcExactlyOnceOptions defaults() { - return builder().build(); - } - - public boolean isDiscoverAndRollbackOnRecovery() { - return discoverAndRollbackOnRecovery; - } - - public boolean isAllowOutOfOrderCommits() { - return allowOutOfOrderCommits; - } - - public int getMaxCommitAttempts() { - return maxCommitAttempts; - } - - public Integer getTimeoutSec() { - return timeoutSec; - } - - public boolean isTransactionPerConnection() { - return transactionPerConnection; - } - - public static JDBCExactlyOnceOptionsBuilder builder() { - return new JDBCExactlyOnceOptionsBuilder(); - } - - /** JDBCExactlyOnceOptionsBuilder. */ - public static class JDBCExactlyOnceOptionsBuilder { - private boolean recoveredAndRollback = DEFAULT_RECOVERED_AND_ROLLBACK; - private int maxCommitAttempts = DEFAULT_MAX_COMMIT_ATTEMPTS; - private boolean allowOutOfOrderCommits = DEFAULT_ALLOW_OUT_OF_ORDER_COMMITS; - private Optional timeoutSec = Optional.empty(); - private boolean transactionPerConnection = DEFAULT_TRANSACTION_PER_CONNECTION; - - /** - * Toggle discovery and rollback of prepared transactions upon recovery to prevent new - * transactions from being blocked by the older ones. Each subtask rollbacks its own - * transaction. This flag must be disabled when rescaling to prevent data loss. - */ - public JDBCExactlyOnceOptionsBuilder withRecoveredAndRollback(boolean recoveredAndRollback) { - this.recoveredAndRollback = recoveredAndRollback; - return this; - } - - /** - * Set the number of attempt to commit a transaction (takes effect only if transient failure - * happens). - */ - public JDBCExactlyOnceOptionsBuilder withMaxCommitAttempts(int maxCommitAttempts) { - this.maxCommitAttempts = maxCommitAttempts; - return this; - } - - /** - * Set whether transactions may be committed out-of-order in case of retries and this option - * is enabled. - */ - public JDBCExactlyOnceOptionsBuilder withAllowOutOfOrderCommits(boolean allowOutOfOrderCommits) { - this.allowOutOfOrderCommits = allowOutOfOrderCommits; - return this; - } - - /** Set transaction timeout in seconds (vendor-specific). */ - public JDBCExactlyOnceOptionsBuilder withTimeoutSec(Optional timeoutSec) { - this.timeoutSec = timeoutSec; - return this; - } - - /** - * Set whether the same connection can be used for multiple XA transactions. A transaction - * is prepared each time a checkpoint is performed; it is committed once the checkpoint is - * confirmed. There can be multiple un-confirmed checkpoints and therefore multiple prepared - * transactions. - * - *

Some databases support this natively (e.g. Oracle); while others only allow a single - * XA transaction per connection (e.g. MySQL, PostgreSQL). - * - *

If enabled, each transaction uses a separate connection from a pool. The database - * limit of open connections might need to be adjusted. - * - *

Disabled by default. - */ - public JDBCExactlyOnceOptionsBuilder withTransactionPerConnection(boolean transactionPerConnection) { - this.transactionPerConnection = transactionPerConnection; - return this; - } - - public JdbcExactlyOnceOptions build() { - return new JdbcExactlyOnceOptions( - recoveredAndRollback, - maxCommitAttempts, - allowOutOfOrderCommits, - timeoutSec, - transactionPerConnection); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcExecutionOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcExecutionOptions.java deleted file mode 100644 index 70121e2ae4..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcExecutionOptions.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix; - -import org.apache.flink.annotation.PublicEvolving; -import org.apache.flink.util.Preconditions; - -import java.io.Serializable; -import java.util.Objects; - -/** JDBC sink batch options. */ -@PublicEvolving -public class JdbcExecutionOptions implements Serializable { - public static final int DEFAULT_MAX_RETRY_TIMES = 3; - private static final int DEFAULT_INTERVAL_MILLIS = 0; - public static final int DEFAULT_SIZE = 5000; - - private final long batchIntervalMs; - private final int batchSize; - private final int maxRetries; - - private JdbcExecutionOptions(long batchIntervalMs, int batchSize, int maxRetries) { - Preconditions.checkArgument(maxRetries >= 0); - this.batchIntervalMs = batchIntervalMs; - this.batchSize = batchSize; - this.maxRetries = maxRetries; - } - - public long getBatchIntervalMs() { - return batchIntervalMs; - } - - public int getBatchSize() { - return batchSize; - } - - public int getMaxRetries() { - return maxRetries; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - JdbcExecutionOptions that = (JdbcExecutionOptions) o; - return batchIntervalMs == that.batchIntervalMs && batchSize == that.batchSize && maxRetries == that.maxRetries; - } - - @Override - public int hashCode() { - return Objects.hash(batchIntervalMs, batchSize, maxRetries); - } - - public static Builder builder() { - return new Builder(); - } - - public static JdbcExecutionOptions defaults() { - return builder().build(); - } - - /** Builder for {@link JdbcExecutionOptions}. */ - public static final class Builder { - private long intervalMs = DEFAULT_INTERVAL_MILLIS; - private int size = DEFAULT_SIZE; - private int maxRetries = DEFAULT_MAX_RETRY_TIMES; - - public Builder withBatchSize(int size) { - this.size = size; - return this; - } - - public Builder withBatchIntervalMs(long intervalMs) { - this.intervalMs = intervalMs; - return this; - } - - public Builder withMaxRetries(int maxRetries) { - this.maxRetries = maxRetries; - return this; - } - - public JdbcExecutionOptions build() { - return new JdbcExecutionOptions(intervalMs, size, maxRetries); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcStatementBuilder.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcStatementBuilder.java deleted file mode 100644 index 434a530cd1..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/JdbcStatementBuilder.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix; - -import org.apache.flink.annotation.PublicEvolving; -import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor; -import org.apache.flink.util.function.BiConsumerWithException; - -import java.io.Serializable; -import java.sql.PreparedStatement; -import java.sql.SQLException; - -/** - * Sets {@link PreparedStatement} parameters to use in JDBC Sink based on a specific type of - * StreamRecord. - * - * @param type of payload in {@link org.apache.flink.streaming.runtime.streamrecord.StreamRecord - * StreamRecord} - * @see JdbcBatchStatementExecutor - */ -@PublicEvolving -public interface JdbcStatementBuilder - extends BiConsumerWithException, Serializable {} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/PhoenixInputFormat.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/PhoenixInputFormat.java deleted file mode 100644 index ac31060003..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/PhoenixInputFormat.java +++ /dev/null @@ -1,449 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix; - -import org.apache.commons.lang.StringUtils; -import org.apache.flink.annotation.Experimental; -import org.apache.flink.annotation.VisibleForTesting; -import org.apache.flink.api.common.io.DefaultInputSplitAssigner; -import org.apache.flink.api.common.io.InputFormat; -import org.apache.flink.api.common.io.RichInputFormat; -import org.apache.flink.api.common.io.statistics.BaseStatistics; -import org.apache.flink.api.java.typeutils.ResultTypeQueryable; -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.configuration.Configuration; -import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider; -import org.apache.flink.connector.phoenix.split.JdbcParameterValuesProvider; -import org.apache.flink.core.io.GenericInputSplit; -import org.apache.flink.core.io.InputSplit; -import org.apache.flink.core.io.InputSplitAssigner; -import org.apache.flink.types.Row; -import org.apache.flink.util.Preconditions; - -import java.io.IOException; -import java.math.BigDecimal; -import java.sql.Array; -import java.sql.Connection; -import java.sql.Date; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.Arrays; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * InputFormat to read data from a database and generate Rows. The InputFormat has to be configured - * using the supplied InputFormatBuilder. A valid RowTypeInfo must be properly configured in the - * builder, e.g.: - * - *


- * TypeInformation[] fieldTypes = new TypeInformation[] {
- *  BasicTypeInfo.INT_TYPE_INFO,
- *  BasicTypeInfo.STRING_TYPE_INFO,
- *  BasicTypeInfo.STRING_TYPE_INFO,
- *  BasicTypeInfo.DOUBLE_TYPE_INFO,
- *  BasicTypeInfo.INT_TYPE_INFO
- * };
- *
- * RowTypeInfo rowTypeInfo = new RowTypeInfo(fieldTypes);
- *
- * JdbcInputFormat jdbcInputFormat = JdbcInputFormat.buildJdbcInputFormat()
- *          .setDrivername("org.apache.derby.jdbc.EmbeddedDriver")
- *          .setDBUrl("jdbc:derby:memory:ebookshop")
- *          .setQuery("select * from books")
- *          .setRowTypeInfo(rowTypeInfo)
- *          .finish();
- * 
- * - *

In order to query the JDBC source in parallel, you need to provide a parameterized query - * template (i.e. a valid {@link PreparedStatement}) and a {@link JdbcParameterValuesProvider} which - * provides binding values for the query parameters. E.g.: - * - *


- *
- * Serializable[][] queryParameters = new String[2][1];
- * queryParameters[0] = new String[]{"Kumar"};
- * queryParameters[1] = new String[]{"Tan Ah Teck"};
- *
- * JdbcInputFormat jdbcInputFormat = JdbcInputFormat.buildJdbcInputFormat()
- *           .setDrivername("org.apache.derby.jdbc.EmbeddedDriver")
- *          .setDBUrl("jdbc:derby:memory:ebookshop")
- *          .setQuery("select * from books WHERE author = ?")
- *          .setRowTypeInfo(rowTypeInfo)
- *          .setParametersProvider(new JdbcGenericParameterValuesProvider(queryParameters))
- *          .finish();
- * 
- * - * @see Row - * @see JdbcParameterValuesProvider - * @see PreparedStatement - * @see DriverManager - */ -@Experimental -public class PhoenixInputFormat extends RichInputFormat implements ResultTypeQueryable { - - protected static final long serialVersionUID = 2L; - protected static final Logger LOG = LoggerFactory.getLogger(PhoenixInputFormat.class); - - protected JdbcConnectionProvider connectionProvider; - protected String queryTemplate; - protected int resultSetType; - protected int resultSetConcurrency; - protected RowTypeInfo rowTypeInfo; - - protected boolean namespaceMappingEnabled; - protected boolean mapSystemTablesEnabled; - - protected transient PreparedStatement statement; - protected transient ResultSet resultSet; - protected int fetchSize; - // Boolean to distinguish between default value and explicitly set autoCommit mode. - protected Boolean autoCommit; - - protected boolean hasNext; - protected Object[][] parameterValues; - - public PhoenixInputFormat() {} - - @Override - public RowTypeInfo getProducedType() { - return rowTypeInfo; - } - - @Override - public void configure(Configuration parameters) { - // do nothing here - } - - @Override - public void openInputFormat() { - // called once per inputFormat (on open) - try { - Connection dbConn = connectionProvider.getOrEstablishConnection(); - - // set autoCommit mode only if it was explicitly configured. - // keep connection default otherwise. - if (autoCommit != null) { - dbConn.setAutoCommit(autoCommit); - } - - LOG.debug("openInputFormat query :" + queryTemplate); - // 删除 ` 号 phoenix中不支持 - String initQuery = StringUtils.remove(queryTemplate, "\\`"); - LOG.debug("openInputFormat initQuery :" + initQuery); - // 将 " 双引号替换成 ' 单引号 - String replaceQuery = StringUtils.replace(initQuery, "\"", "'"); - LOG.info("openInputFormat replaceQuery :" + replaceQuery); - - statement = dbConn.prepareStatement(replaceQuery, resultSetType, resultSetConcurrency); - if (fetchSize == Integer.MIN_VALUE || fetchSize > 0) { - statement.setFetchSize(fetchSize); - } - } catch (SQLException se) { - throw new IllegalArgumentException("open() failed." + se.getMessage(), se); - } catch (ClassNotFoundException cnfe) { - throw new IllegalArgumentException("JDBC-Class not found. - " + cnfe.getMessage(), cnfe); - } - } - - @Override - public void closeInputFormat() { - // called once per inputFormat (on close) - try { - if (statement != null) { - statement.close(); - } - } catch (SQLException se) { - LOG.info("Inputformat Statement couldn't be closed - " + se.getMessage()); - } finally { - statement = null; - } - - connectionProvider.closeConnection(); - - parameterValues = null; - } - - /** - * Connects to the source database and executes the query in a parallel fashion if this - * {@link InputFormat} is built using a parameterized query (i.e. using a {@link - * PreparedStatement}) and a proper {@link JdbcParameterValuesProvider}, in a non-parallel - * fashion otherwise. - * - * @param inputSplit which is ignored if this InputFormat is executed as a non-parallel source, - * a "hook" to the query parameters otherwise (using its splitNumber) - * @throws IOException if there's an error during the execution of the query - */ - @Override - public void open(InputSplit inputSplit) throws IOException { - try { - if (inputSplit != null && parameterValues != null) { - for (int i = 0; i < parameterValues[inputSplit.getSplitNumber()].length; i++) { - Object param = parameterValues[inputSplit.getSplitNumber()][i]; - if (param instanceof String) { - statement.setString(i + 1, (String) param); - } else if (param instanceof Long) { - statement.setLong(i + 1, (Long) param); - } else if (param instanceof Integer) { - statement.setInt(i + 1, (Integer) param); - } else if (param instanceof Double) { - statement.setDouble(i + 1, (Double) param); - } else if (param instanceof Boolean) { - statement.setBoolean(i + 1, (Boolean) param); - } else if (param instanceof Float) { - statement.setFloat(i + 1, (Float) param); - } else if (param instanceof BigDecimal) { - statement.setBigDecimal(i + 1, (BigDecimal) param); - } else if (param instanceof Byte) { - statement.setByte(i + 1, (Byte) param); - } else if (param instanceof Short) { - statement.setShort(i + 1, (Short) param); - } else if (param instanceof Date) { - statement.setDate(i + 1, (Date) param); - } else if (param instanceof Time) { - statement.setTime(i + 1, (Time) param); - } else if (param instanceof Timestamp) { - statement.setTimestamp(i + 1, (Timestamp) param); - } else if (param instanceof Array) { - statement.setArray(i + 1, (Array) param); - } else { - // extends with other types if needed - throw new IllegalArgumentException("open() failed. Parameter " - + i - + " of type " - + param.getClass() - + " is not handled (yet)."); - } - } - if (LOG.isDebugEnabled()) { - LOG.debug(String.format( - "Executing '%s' with parameters %s", - queryTemplate, Arrays.deepToString(parameterValues[inputSplit.getSplitNumber()]))); - } - } - resultSet = statement.executeQuery(); - hasNext = resultSet.next(); - } catch (SQLException se) { - throw new IllegalArgumentException("open() failed." + se.getMessage(), se); - } - } - - /** - * Closes all resources used. - * - * @throws IOException Indicates that a resource could not be closed. - */ - @Override - public void close() throws IOException { - if (resultSet == null) { - return; - } - try { - resultSet.close(); - } catch (SQLException se) { - LOG.info("Inputformat ResultSet couldn't be closed - " + se.getMessage()); - } - } - - /** - * Checks whether all data has been read. - * - * @return boolean value indication whether all data has been read. - * @throws IOException - */ - @Override - public boolean reachedEnd() throws IOException { - return !hasNext; - } - - /** - * Stores the next resultSet row in a tuple. - * - * @param reuse row to be reused. - * @return row containing next {@link Row} - * @throws IOException - */ - @Override - public Row nextRecord(Row reuse) throws IOException { - try { - if (!hasNext) { - return null; - } - for (int pos = 0; pos < reuse.getArity(); pos++) { - reuse.setField(pos, resultSet.getObject(pos + 1)); - } - // update hasNext after we've read the record - hasNext = resultSet.next(); - return reuse; - } catch (SQLException se) { - throw new IOException("Couldn't read data - " + se.getMessage(), se); - } catch (NullPointerException npe) { - throw new IOException("Couldn't access resultSet", npe); - } - } - - @Override - public BaseStatistics getStatistics(BaseStatistics cachedStatistics) throws IOException { - return cachedStatistics; - } - - @Override - public InputSplit[] createInputSplits(int minNumSplits) throws IOException { - if (parameterValues == null) { - return new GenericInputSplit[] {new GenericInputSplit(0, 1)}; - } - GenericInputSplit[] ret = new GenericInputSplit[parameterValues.length]; - for (int i = 0; i < ret.length; i++) { - ret[i] = new GenericInputSplit(i, ret.length); - } - return ret; - } - - @Override - public InputSplitAssigner getInputSplitAssigner(InputSplit[] inputSplits) { - return new DefaultInputSplitAssigner(inputSplits); - } - - @VisibleForTesting - protected PreparedStatement getStatement() { - return statement; - } - - @VisibleForTesting - protected Connection getDbConn() { - return connectionProvider.getConnection(); - } - - /** - * A builder used to set parameters to the output format's configuration in a fluent way. - * - * @return builder - */ - public static PhoenixInputFormatBuilder buildJdbcInputFormat() { - return new PhoenixInputFormatBuilder(); - } - - /** Builder for {@link PhoenixInputFormat}. */ - public static class PhoenixInputFormatBuilder { - private final JdbcConnectionOptions.JdbcConnectionOptionsBuilder connOptionsBuilder; - private final PhoenixInputFormat format; - - public PhoenixInputFormatBuilder() { - // this.connOptionsBuilder = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder(); - this.connOptionsBuilder = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder(); - this.format = new PhoenixInputFormat(); - // using TYPE_FORWARD_ONLY for high performance reads - this.format.resultSetType = ResultSet.TYPE_FORWARD_ONLY; - this.format.resultSetConcurrency = ResultSet.CONCUR_READ_ONLY; - } - - public PhoenixInputFormatBuilder setUsername(String username) { - connOptionsBuilder.withUsername(username); - return this; - } - - public PhoenixInputFormatBuilder setPassword(String password) { - connOptionsBuilder.withPassword(password); - return this; - } - - public PhoenixInputFormatBuilder setDrivername(String drivername) { - connOptionsBuilder.withDriverName(drivername); - return this; - } - - public PhoenixInputFormatBuilder setDBUrl(String dbURL) { - connOptionsBuilder.withUrl(dbURL); - return this; - } - - public PhoenixInputFormatBuilder setQuery(String query) { - format.queryTemplate = query; - return this; - } - - public PhoenixInputFormatBuilder setResultSetType(int resultSetType) { - format.resultSetType = resultSetType; - return this; - } - - public PhoenixInputFormatBuilder setResultSetConcurrency(int resultSetConcurrency) { - format.resultSetConcurrency = resultSetConcurrency; - return this; - } - - public PhoenixInputFormatBuilder setParametersProvider(JdbcParameterValuesProvider parameterValuesProvider) { - format.parameterValues = parameterValuesProvider.getParameterValues(); - return this; - } - - public PhoenixInputFormatBuilder setRowTypeInfo(RowTypeInfo rowTypeInfo) { - format.rowTypeInfo = rowTypeInfo; - return this; - } - - public PhoenixInputFormatBuilder setFetchSize(int fetchSize) { - Preconditions.checkArgument( - fetchSize == Integer.MIN_VALUE || fetchSize > 0, - "Illegal value %s for fetchSize, has to be positive or Integer.MIN_VALUE.", - fetchSize); - format.fetchSize = fetchSize; - return this; - } - - public PhoenixInputFormatBuilder setAutoCommit(Boolean autoCommit) { - format.autoCommit = autoCommit; - return this; - } - - public PhoenixInputFormatBuilder setNamespaceMappingEnabled(Boolean namespaceMappingEnabled) { - format.namespaceMappingEnabled = namespaceMappingEnabled; - return this; - } - - public PhoenixInputFormatBuilder setMapSystemTablesEnabled(Boolean mapSystemTablesEnabled) { - format.mapSystemTablesEnabled = mapSystemTablesEnabled; - return this; - } - - public PhoenixInputFormat finish() { - format.connectionProvider = - // new SimpleJdbcConnectionProvider(connOptionsBuilder.build()); - new PhoneixJdbcConnectionProvider( - connOptionsBuilder.build(), format.namespaceMappingEnabled, format.namespaceMappingEnabled); - if (format.queryTemplate == null) { - throw new NullPointerException("No query supplied"); - } - if (format.rowTypeInfo == null) { - throw new NullPointerException("No " + RowTypeInfo.class.getSimpleName() + " supplied"); - } - if (format.parameterValues == null) { - LOG.debug("No input splitting configured (data will be read with parallelism 1)."); - } - return format; - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/AbstractDialect.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/AbstractDialect.java deleted file mode 100644 index 8f919403dc..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/AbstractDialect.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.dialect; - -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.api.ValidationException; -import org.apache.flink.table.types.DataType; -import org.apache.flink.table.types.logical.DecimalType; -import org.apache.flink.table.types.logical.LogicalTypeRoot; -import org.apache.flink.table.types.logical.TimestampType; -import org.apache.flink.table.types.logical.VarBinaryType; - -import java.util.List; - -abstract class AbstractDialect implements JdbcDialect { - - @Override - public void validate(TableSchema schema) throws ValidationException { - for (int i = 0; i < schema.getFieldCount(); i++) { - DataType dt = schema.getFieldDataType(i).get(); - String fieldName = schema.getFieldName(i).get(); - - // TODO: We can't convert VARBINARY(n) data type to - // PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO in - // LegacyTypeInfoDataTypeConverter - // when n is smaller than Integer.MAX_VALUE - if (unsupportedTypes().contains(dt.getLogicalType().getTypeRoot()) - || (dt.getLogicalType() instanceof VarBinaryType - && Integer.MAX_VALUE != ((VarBinaryType) dt.getLogicalType()).getLength())) { - throw new ValidationException( - String.format("The %s dialect doesn't support type: %s.", dialectName(), dt.toString())); - } - - // only validate precision of DECIMAL type for blink planner - if (dt.getLogicalType() instanceof DecimalType) { - int precision = ((DecimalType) dt.getLogicalType()).getPrecision(); - if (precision > maxDecimalPrecision() || precision < minDecimalPrecision()) { - throw new ValidationException(String.format( - "The precision of field '%s' is out of the DECIMAL " - + "precision range [%d, %d] supported by %s dialect.", - fieldName, minDecimalPrecision(), maxDecimalPrecision(), dialectName())); - } - } - - // only validate precision of DECIMAL type for blink planner - if (dt.getLogicalType() instanceof TimestampType) { - int precision = ((TimestampType) dt.getLogicalType()).getPrecision(); - if (precision > maxTimestampPrecision() || precision < minTimestampPrecision()) { - throw new ValidationException(String.format( - "The precision of field '%s' is out of the TIMESTAMP " - + "precision range [%d, %d] supported by %s dialect.", - fieldName, minTimestampPrecision(), maxTimestampPrecision(), dialectName())); - } - } - } - } - - public abstract int maxDecimalPrecision(); - - public abstract int minDecimalPrecision(); - - public abstract int maxTimestampPrecision(); - - public abstract int minTimestampPrecision(); - - /** - * Defines the unsupported types for the dialect. - * - * @return a list of logical type roots. - */ - public abstract List unsupportedTypes(); -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/JdbcDialect.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/JdbcDialect.java deleted file mode 100644 index ae30d02263..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/JdbcDialect.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.dialect; - -import static java.lang.String.format; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.api.ValidationException; -import org.apache.flink.table.types.logical.RowType; - -import java.io.Serializable; -import java.util.Arrays; -import java.util.Optional; -import java.util.stream.Collectors; - -/** Handle the SQL dialect of jdbc driver. */ -@Internal -public interface JdbcDialect extends Serializable { - - /** - * Get the name of jdbc dialect. - * - * @return the dialect name. - */ - String dialectName(); - - /** - * Check if this dialect instance can handle a certain jdbc url. - * - * @param url the jdbc url. - * @return True if the dialect can be applied on the given jdbc url. - */ - boolean canHandle(String url); - - /** - * Get converter that convert jdbc object and Flink internal object each other. - * - * @param rowType the given row type - * @return a row converter for the database - */ - JdbcRowConverter getRowConverter(RowType rowType); - - /** - * Get limit clause to limit the number of emitted row from the jdbc source. - * - * @param limit number of row to emit. The value of the parameter should be non-negative. - * @return the limit clause. - */ - String getLimitClause(long limit); - - /** - * Check if this dialect instance support a specific data type in table schema. - * - * @param schema the table schema. - * @exception ValidationException in case of the table schema contains unsupported type. - */ - default void validate(TableSchema schema) throws ValidationException {} - - /** - * @return the default driver class name, if user not configure the driver class name, then will - * use this one. - */ - default Optional defaultDriverName() { - return Optional.empty(); - } - - /** - * Quotes the identifier. This is used to put quotes around the identifier in case the column - * name is a reserved keyword, or in case it contains characters that require quotes (e.g. - * space). Default using double quotes {@code "} to quote. - */ - default String quoteIdentifier(String identifier) { - return "\"" + identifier + "\""; - } - - /** - * Get dialect upsert statement, the database has its own upsert syntax, such as Mysql using - * DUPLICATE KEY UPDATE, and PostgresSQL using ON CONFLICT... DO UPDATE SET.. - * - * @return None if dialect does not support upsert statement, the writer will degrade to the use - * of select + update/insert, this performance is poor. - */ - default Optional getUpsertStatement(String tableName, String[] fieldNames, String[] uniqueKeyFields) { - return Optional.empty(); - } - - /** Get row exists statement by condition fields. Default use SELECT. */ - default String getRowExistsStatement(String tableName, String[] conditionFields) { - String fieldExpressions = Arrays.stream(conditionFields) - .map(f -> format("%s = :%s", quoteIdentifier(f), f)) - .collect(Collectors.joining(" AND ")); - return "SELECT 1 FROM " + quoteIdentifier(tableName) + " WHERE " + fieldExpressions; - } - - /** Get insert into statement. */ - default String getInsertIntoStatement(String tableName, String[] fieldNames) { - String columns = Arrays.stream(fieldNames).map(this::quoteIdentifier).collect(Collectors.joining(", ")); - String placeholders = Arrays.stream(fieldNames).map(f -> ":" + f).collect(Collectors.joining(", ")); - return "INSERT INTO " + quoteIdentifier(tableName) + "(" + columns + ")" + " VALUES (" + placeholders + ")"; - } - - /** - * Get update one row statement by condition fields, default not use limit 1, because limit 1 is - * a sql dialect. - */ - default String getUpdateStatement(String tableName, String[] fieldNames, String[] conditionFields) { - String setClause = Arrays.stream(fieldNames) - .map(f -> format("%s = :%s", quoteIdentifier(f), f)) - .collect(Collectors.joining(", ")); - String conditionClause = Arrays.stream(conditionFields) - .map(f -> format("%s = :%s", quoteIdentifier(f), f)) - .collect(Collectors.joining(" AND ")); - return "UPDATE " + quoteIdentifier(tableName) + " SET " + setClause + " WHERE " + conditionClause; - } - - /** - * Get delete one row statement by condition fields, default not use limit 1, because limit 1 is - * a sql dialect. - */ - default String getDeleteStatement(String tableName, String[] conditionFields) { - String conditionClause = Arrays.stream(conditionFields) - .map(f -> format("%s = :%s", quoteIdentifier(f), f)) - .collect(Collectors.joining(" AND ")); - return "DELETE FROM " + quoteIdentifier(tableName) + " WHERE " + conditionClause; - } - - /** Get select fields statement by condition fields. Default use SELECT. */ - default String getSelectFromStatement(String tableName, String[] selectFields, String[] conditionFields) { - String selectExpressions = - Arrays.stream(selectFields).map(this::quoteIdentifier).collect(Collectors.joining(", ")); - String fieldExpressions = Arrays.stream(conditionFields) - .map(f -> format("%s = :%s", quoteIdentifier(f), f)) - .collect(Collectors.joining(" AND ")); - return "SELECT " - + selectExpressions - + " FROM " - + quoteIdentifier(tableName) - + (conditionFields.length > 0 ? " WHERE " + fieldExpressions : ""); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/JdbcDialects.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/JdbcDialects.java deleted file mode 100644 index 6bcc2bcc5b..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/JdbcDialects.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.dialect; - -import java.util.Arrays; -import java.util.List; -import java.util.Optional; - -/** Default JDBC dialects. */ -public final class JdbcDialects { - - private static final List DIALECTS = Arrays.asList(new PhoenixDialect()); - - /** Fetch the JdbcDialect class corresponding to a given database url. */ - public static Optional get(String url) { - for (JdbcDialect dialect : DIALECTS) { - if (dialect.canHandle(url)) { - return Optional.of(dialect); - } - } - return Optional.empty(); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/PhoenixDialect.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/PhoenixDialect.java deleted file mode 100644 index d2cbd8794f..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/dialect/PhoenixDialect.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.dialect; - -import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter; -import org.apache.flink.connector.phoenix.internal.converter.PhoenixRowConverter; -import org.apache.flink.table.types.logical.LogicalTypeRoot; -import org.apache.flink.table.types.logical.RowType; - -import java.util.Arrays; -import java.util.List; -import java.util.Optional; -import java.util.stream.Collectors; - -/** - * PhoenixDialect - * - * @since 2022/3/16 11:19 - */ -public class PhoenixDialect extends AbstractDialect { - private static final long serialVersionUID = 1L; - - private static final int MAX_TIMESTAMP_PRECISION = 6; - private static final int MIN_TIMESTAMP_PRECISION = 1; - - private static final int MAX_DECIMAL_PRECISION = 65; - private static final int MIN_DECIMAL_PRECISION = 1; - - @Override - public boolean canHandle(String url) { - return url.startsWith("jdbc:phoenix:"); - } - - @Override - public JdbcRowConverter getRowConverter(RowType rowType) { - return new PhoenixRowConverter(rowType); - } - - @Override - public String getLimitClause(long limit) { - return "LIMIT " + limit; - } - - @Override - public Optional defaultDriverName() { - return Optional.of("org.apache.phoenix.jdbc.PhoenixDriver"); - } - - /** - * phoenix不支持 ` 号 不加任何 " ` 号 在列名以及表名上,否则会导致phoenix解析错误 - * - * @param identifier - * @return - */ - @Override - public String quoteIdentifier(String identifier) { - // return "`" + identifier + "`"; - // return super.quoteIdentifier(identifier); - return identifier; - } - - @Override - public Optional getUpsertStatement(String tableName, String[] fieldNames, String[] uniqueKeyFields) { - String columns = - (String) Arrays.stream(fieldNames).map(this::quoteIdentifier).collect(Collectors.joining(", ")); - String placeholders = (String) Arrays.stream(fieldNames) - .map((f) -> { - return ":" + f; - }) - .collect(Collectors.joining(", ")); - String sql = - "UPSERT INTO " + this.quoteIdentifier(tableName) + "(" + columns + ") VALUES (" + placeholders + ")"; - return Optional.of(sql); - } - - @Override - public String getInsertIntoStatement(String tableName, String[] fieldNames) { - return this.getUpsertStatement(tableName, fieldNames, null).get(); - } - - @Override - public String dialectName() { - return "Phoenix"; - } - - @Override - public int maxDecimalPrecision() { - return MAX_DECIMAL_PRECISION; - } - - @Override - public int minDecimalPrecision() { - return MIN_DECIMAL_PRECISION; - } - - @Override - public int maxTimestampPrecision() { - return MAX_TIMESTAMP_PRECISION; - } - - @Override - public int minTimestampPrecision() { - return MIN_TIMESTAMP_PRECISION; - } - - @Override - public List unsupportedTypes() { - - return Arrays.asList( - LogicalTypeRoot.BINARY, - LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE, - LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE, - LogicalTypeRoot.INTERVAL_YEAR_MONTH, - LogicalTypeRoot.INTERVAL_DAY_TIME, - LogicalTypeRoot.ARRAY, - LogicalTypeRoot.MULTISET, - LogicalTypeRoot.MAP, - LogicalTypeRoot.ROW, - LogicalTypeRoot.DISTINCT_TYPE, - LogicalTypeRoot.STRUCTURED_TYPE, - LogicalTypeRoot.NULL, - LogicalTypeRoot.RAW, - LogicalTypeRoot.SYMBOL, - LogicalTypeRoot.UNRESOLVED); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/AbstractJdbcOutputFormat.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/AbstractJdbcOutputFormat.java deleted file mode 100644 index 3e3b85cb45..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/AbstractJdbcOutputFormat.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal; - -import org.apache.flink.api.common.io.RichOutputFormat; -import org.apache.flink.configuration.Configuration; -import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider; -import org.apache.flink.util.Preconditions; - -import java.io.Flushable; -import java.io.IOException; -import java.sql.Connection; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Base jdbc outputFormat. */ -public abstract class AbstractJdbcOutputFormat extends RichOutputFormat implements Flushable { - - private static final long serialVersionUID = 1L; - public static final int DEFAULT_FLUSH_MAX_SIZE = 5000; - public static final long DEFAULT_FLUSH_INTERVAL_MILLS = 0L; - - private static final Logger LOG = LoggerFactory.getLogger(AbstractJdbcOutputFormat.class); - protected final JdbcConnectionProvider connectionProvider; - - public AbstractJdbcOutputFormat(JdbcConnectionProvider connectionProvider) { - this.connectionProvider = Preconditions.checkNotNull(connectionProvider); - } - - @Override - public void configure(Configuration parameters) {} - - @Override - public void open(int taskNumber, int numTasks) throws IOException { - try { - connectionProvider.getOrEstablishConnection(); - } catch (Exception e) { - throw new IOException("unable to open JDBC writer", e); - } - } - - @Override - public void close() { - connectionProvider.closeConnection(); - } - - @Override - public void flush() throws IOException {} - - // @VisibleForTesting - public Connection getConnection() { - return connectionProvider.getConnection(); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/GenericJdbcSinkFunction.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/GenericJdbcSinkFunction.java deleted file mode 100644 index 14a2478120..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/GenericJdbcSinkFunction.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.api.common.functions.RuntimeContext; -import org.apache.flink.configuration.Configuration; -import org.apache.flink.runtime.state.FunctionInitializationContext; -import org.apache.flink.runtime.state.FunctionSnapshotContext; -import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; -import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; -import org.apache.flink.util.Preconditions; - -import java.io.IOException; - -import javax.annotation.Nonnull; - -/** A generic SinkFunction for JDBC. */ -@Internal -public class GenericJdbcSinkFunction extends RichSinkFunction implements CheckpointedFunction { - private final AbstractJdbcOutputFormat outputFormat; - - public GenericJdbcSinkFunction(@Nonnull AbstractJdbcOutputFormat outputFormat) { - this.outputFormat = Preconditions.checkNotNull(outputFormat); - } - - @Override - public void open(Configuration parameters) throws Exception { - super.open(parameters); - RuntimeContext ctx = getRuntimeContext(); - outputFormat.setRuntimeContext(ctx); - outputFormat.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks()); - } - - @Override - public void invoke(T value, Context context) throws IOException { - outputFormat.writeRecord(value); - } - - @Override - public void initializeState(FunctionInitializationContext context) {} - - @Override - public void snapshotState(FunctionSnapshotContext context) throws Exception { - outputFormat.flush(); - } - - @Override - public void close() { - outputFormat.close(); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/JdbcBatchingOutputFormat.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/JdbcBatchingOutputFormat.java deleted file mode 100644 index 1d2bab6f03..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/JdbcBatchingOutputFormat.java +++ /dev/null @@ -1,377 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal; - -import static org.apache.flink.connector.phoenix.utils.JdbcUtils.setRecordToStatement; -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.api.common.functions.RuntimeContext; -import org.apache.flink.api.java.tuple.Tuple2; -import org.apache.flink.connector.phoenix.JdbcExecutionOptions; -import org.apache.flink.connector.phoenix.JdbcStatementBuilder; -import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor; -import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions; -import org.apache.flink.connector.phoenix.internal.options.JdbcOptions; -import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatementImpl; -import org.apache.flink.connector.phoenix.utils.JdbcUtils; -import org.apache.flink.types.Row; -import org.apache.flink.util.Preconditions; -import org.apache.flink.util.concurrent.ExecutorThreadFactory; - -import java.io.IOException; -import java.io.Serializable; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.HashMap; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; - -import javax.annotation.Nonnull; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** A JDBC outputFormat that supports batching records before writing records to database. */ -@Internal -public class JdbcBatchingOutputFormat> - extends AbstractJdbcOutputFormat { - - /** - * An interface to extract a value from given argument. - * - * @param The type of given argument - * @param The type of the return value - */ - public interface RecordExtractor extends Function, Serializable { - static RecordExtractor identity() { - return x -> x; - } - } - - /** - * A factory for creating {@link JdbcBatchStatementExecutor} instance. - * - * @param The type of instance. - */ - public interface StatementExecutorFactory> - extends Function, Serializable {} - - private static final long serialVersionUID = 1L; - - private static final Logger LOG = LoggerFactory.getLogger(JdbcBatchingOutputFormat.class); - - private final JdbcExecutionOptions executionOptions; - private final StatementExecutorFactory statementExecutorFactory; - private final RecordExtractor jdbcRecordExtractor; - - private transient E jdbcStatementExecutor; - private transient int batchCount = 0; - private transient volatile boolean closed = false; - - private transient ScheduledExecutorService scheduler; - private transient ScheduledFuture scheduledFuture; - private transient volatile Exception flushException; - private Connection conn = null; - - public JdbcBatchingOutputFormat( - @Nonnull JdbcConnectionProvider connectionProvider, - @Nonnull JdbcExecutionOptions executionOptions, - @Nonnull StatementExecutorFactory statementExecutorFactory, - @Nonnull RecordExtractor recordExtractor) { - super(connectionProvider); - this.executionOptions = checkNotNull(executionOptions); - this.statementExecutorFactory = checkNotNull(statementExecutorFactory); - this.jdbcRecordExtractor = checkNotNull(recordExtractor); - } - - /** - * Connects to the target database and initializes the prepared statement. - * - * @param taskNumber The number of the parallel instance. - */ - @Override - public void open(int taskNumber, int numTasks) throws IOException { - // super.open(taskNumber, numTasks); - try { - conn = connectionProvider.getOrEstablishConnection(); - } catch (Exception e) { - throw new IOException("unable to open JDBC writer", e); - } - - jdbcStatementExecutor = createAndOpenStatementExecutor(statementExecutorFactory); - if (executionOptions.getBatchIntervalMs() != 0 && executionOptions.getBatchSize() != 1) { - this.scheduler = - Executors.newScheduledThreadPool(1, new ExecutorThreadFactory("jdbc-upsert-output-format")); - this.scheduledFuture = this.scheduler.scheduleWithFixedDelay( - () -> { - synchronized (JdbcBatchingOutputFormat.this) { - if (!closed) { - // if batch count > 0 to flush - if (batchCount > 0) { - try { - flush(); - } catch (Exception e) { - flushException = e; - } - } - } - } - }, - executionOptions.getBatchIntervalMs(), - executionOptions.getBatchIntervalMs(), - TimeUnit.MILLISECONDS); - } - } - - private E createAndOpenStatementExecutor(StatementExecutorFactory statementExecutorFactory) throws IOException { - E exec = statementExecutorFactory.apply(getRuntimeContext()); - try { - exec.prepareStatements(connectionProvider.getConnection()); - } catch (SQLException e) { - throw new IOException("unable to open JDBC writer", e); - } - return exec; - } - - private void checkFlushException() { - if (flushException != null) { - throw new RuntimeException("Writing records to JDBC failed.", flushException); - } - } - - @Override - public final synchronized void writeRecord(I record) throws IOException { - checkFlushException(); - - try { - addToBatch(record, jdbcRecordExtractor.apply(record)); - batchCount++; - if (executionOptions.getBatchSize() > 0 && batchCount >= executionOptions.getBatchSize()) { - flush(); - } - } catch (Exception e) { - throw new IOException("Writing records to JDBC failed.", e); - } - } - - protected void addToBatch(I original, J extracted) throws SQLException { - jdbcStatementExecutor.addToBatch(extracted); - } - - @Override - public synchronized void flush() throws IOException { - checkFlushException(); - - for (int i = 0; i <= executionOptions.getMaxRetries(); i++) { - try { - LOG.debug("pre flush size = {} , retry times = {}", batchCount, i); - attemptFlush(); - // conn.commit(); - batchCount = 0; - break; - } catch (SQLException e) { - LOG.error("JDBC executeBatch error, retry times = {}", i, e); - if (i >= executionOptions.getMaxRetries()) { - throw new IOException(e); - } - try { - if (!connectionProvider.isConnectionValid()) { - updateExecutor(true); - } - } catch (Exception exception) { - LOG.error("JDBC connection is not valid, and reestablish connection failed.", exception); - throw new IOException("Reestablish JDBC connection failed", exception); - } - try { - Thread.sleep(1000 * i); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - throw new IOException("unable to flush; interrupted while doing another attempt", e); - } - } - } - } - - protected void attemptFlush() throws SQLException { - jdbcStatementExecutor.executeBatch(conn); - } - - /** Executes prepared statement and closes all resources of this instance. */ - @Override - public synchronized void close() { - if (!closed) { - closed = true; - - if (this.scheduledFuture != null) { - scheduledFuture.cancel(false); - this.scheduler.shutdown(); - } - - if (batchCount > 0) { - try { - LOG.info("关闭连接前 刷写数据 !!! batchCount: " + batchCount); - flush(); - } catch (Exception e) { - LOG.warn("Writing records to JDBC failed.", e); - throw new RuntimeException("Writing records to JDBC failed.", e); - } - } - - try { - if (jdbcStatementExecutor != null) { - jdbcStatementExecutor.closeStatements(); - } - } catch (SQLException e) { - LOG.warn("Close JDBC writer failed.", e); - } - } - super.close(); - checkFlushException(); - } - - public static Builder builder() { - return new Builder(); - } - - /** Builder for a {@link JdbcBatchingOutputFormat}. */ - public static class Builder { - private JdbcOptions options; - private String[] fieldNames; - private String[] keyFields; - private int[] fieldTypes; - private JdbcExecutionOptions.Builder executionOptionsBuilder = JdbcExecutionOptions.builder(); - - /** required, jdbc options. */ - public Builder setOptions(JdbcOptions options) { - this.options = options; - return this; - } - - /** required, field names of this jdbc sink. */ - public Builder setFieldNames(String[] fieldNames) { - this.fieldNames = fieldNames; - return this; - } - - /** required, upsert unique keys. */ - public Builder setKeyFields(String[] keyFields) { - this.keyFields = keyFields; - return this; - } - - /** required, field types of this jdbc sink. */ - public Builder setFieldTypes(int[] fieldTypes) { - this.fieldTypes = fieldTypes; - return this; - } - - /** - * optional, flush max size (includes all append, upsert and delete records), over this - * number of records, will flush data. - */ - public Builder setFlushMaxSize(int flushMaxSize) { - executionOptionsBuilder.withBatchSize(flushMaxSize); - return this; - } - - /** optional, flush interval mills, over this time, asynchronous threads will flush data. */ - public Builder setFlushIntervalMills(long flushIntervalMills) { - executionOptionsBuilder.withBatchIntervalMs(flushIntervalMills); - return this; - } - - /** optional, max retry times for jdbc connector. */ - public Builder setMaxRetryTimes(int maxRetryTimes) { - executionOptionsBuilder.withMaxRetries(maxRetryTimes); - return this; - } - - /** - * Finalizes the configuration and checks validity. - * - * @return Configured JdbcUpsertOutputFormat - */ - public JdbcBatchingOutputFormat, Row, JdbcBatchStatementExecutor> build() { - checkNotNull(options, "No options supplied."); - checkNotNull(fieldNames, "No fieldNames supplied."); - JdbcDmlOptions dml = JdbcDmlOptions.builder() - .withTableName(options.getTableName()) - .withDialect(options.getDialect()) - .withFieldNames(fieldNames) - .withKeyFields(keyFields) - .withFieldTypes(fieldTypes) - .build(); - - if (dml.getKeyFields().isPresent() && dml.getKeyFields().get().length > 0) { - return new TableJdbcUpsertOutputFormat( - new PhoneixJdbcConnectionProvider( - options, - this.options.isNamespaceMappingEnabled(), - this.options.isMapSystemTablesEnabled()), - dml, - executionOptionsBuilder.build()); - } else { - // warn: don't close over builder fields - String sql = FieldNamedPreparedStatementImpl.parseNamedStatement( - options.getDialect().getInsertIntoStatement(dml.getTableName(), dml.getFieldNames()), - new HashMap<>()); - return new JdbcBatchingOutputFormat<>( - new PhoneixJdbcConnectionProvider( - options, - this.options.isNamespaceMappingEnabled(), - this.options.isMapSystemTablesEnabled()), - executionOptionsBuilder.build(), - ctx -> createSimpleRowExecutor( - sql, - dml.getFieldTypes(), - ctx.getExecutionConfig().isObjectReuseEnabled()), - tuple2 -> { - Preconditions.checkArgument(tuple2.f0); - return tuple2.f1; - }); - } - } - } - - static JdbcBatchStatementExecutor createSimpleRowExecutor(String sql, int[] fieldTypes, boolean objectReuse) { - return JdbcBatchStatementExecutor.simple( - sql, createRowJdbcStatementBuilder(fieldTypes), objectReuse ? Row::copy : Function.identity()); - } - - /** - * Creates a {@link JdbcStatementBuilder} for {@link Row} using the provided SQL types array. - * Uses {@link JdbcUtils#setRecordToStatement} - */ - static JdbcStatementBuilder createRowJdbcStatementBuilder(int[] types) { - return (st, record) -> setRecordToStatement(st, types, record); - } - - public void updateExecutor(boolean reconnect) throws SQLException, ClassNotFoundException { - jdbcStatementExecutor.closeStatements(); - jdbcStatementExecutor.prepareStatements( - reconnect ? connectionProvider.reestablishConnection() : connectionProvider.getConnection()); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/TableJdbcUpsertOutputFormat.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/TableJdbcUpsertOutputFormat.java deleted file mode 100644 index 7c5fce3645..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/TableJdbcUpsertOutputFormat.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal; - -import static org.apache.flink.connector.phoenix.utils.JdbcUtils.getPrimaryKey; -import static org.apache.flink.connector.phoenix.utils.JdbcUtils.setRecordToStatement; -import static org.apache.flink.util.Preconditions.checkArgument; - -import org.apache.flink.annotation.VisibleForTesting; -import org.apache.flink.api.common.functions.RuntimeContext; -import org.apache.flink.api.java.tuple.Tuple2; -import org.apache.flink.connector.phoenix.JdbcExecutionOptions; -import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.executor.InsertOrUpdateJdbcExecutor; -import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor; -import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions; -import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatementImpl; -import org.apache.flink.types.Row; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.function.Function; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -class TableJdbcUpsertOutputFormat - extends JdbcBatchingOutputFormat, Row, JdbcBatchStatementExecutor> { - private static final Logger LOG = LoggerFactory.getLogger(TableJdbcUpsertOutputFormat.class); - - private JdbcBatchStatementExecutor deleteExecutor; - private final StatementExecutorFactory> deleteStatementExecutorFactory; - private Connection conn = null; - - TableJdbcUpsertOutputFormat( - JdbcConnectionProvider connectionProvider, JdbcDmlOptions dmlOptions, JdbcExecutionOptions batchOptions) { - this( - connectionProvider, - batchOptions, - ctx -> createUpsertRowExecutor(dmlOptions, ctx), - ctx -> createDeleteExecutor(dmlOptions, ctx)); - } - - @VisibleForTesting - TableJdbcUpsertOutputFormat( - JdbcConnectionProvider connectionProvider, - JdbcExecutionOptions batchOptions, - StatementExecutorFactory> statementExecutorFactory, - StatementExecutorFactory> deleteStatementExecutorFactory) { - super(connectionProvider, batchOptions, statementExecutorFactory, tuple2 -> tuple2.f1); - this.deleteStatementExecutorFactory = deleteStatementExecutorFactory; - } - - @Override - public void open(int taskNumber, int numTasks) throws IOException { - super.open(taskNumber, numTasks); - try { - conn = connectionProvider.getOrEstablishConnection(); - } catch (Exception e) { - throw new IOException("unable to open JDBC writer", e); - } - - deleteExecutor = deleteStatementExecutorFactory.apply(getRuntimeContext()); - try { - deleteExecutor.prepareStatements(connectionProvider.getConnection()); - } catch (SQLException e) { - throw new IOException(e); - } - } - - private static JdbcBatchStatementExecutor createDeleteExecutor(JdbcDmlOptions dmlOptions, RuntimeContext ctx) { - int[] pkFields = Arrays.stream(dmlOptions.getFieldNames()) - .mapToInt(Arrays.asList(dmlOptions.getFieldNames())::indexOf) - .toArray(); - int[] pkTypes = dmlOptions.getFieldTypes() == null - ? null - : Arrays.stream(pkFields) - .map(f -> dmlOptions.getFieldTypes()[f]) - .toArray(); - String deleteSql = FieldNamedPreparedStatementImpl.parseNamedStatement( - dmlOptions.getDialect().getDeleteStatement(dmlOptions.getTableName(), dmlOptions.getFieldNames()), - new HashMap<>()); - return createKeyedRowExecutor(pkFields, pkTypes, deleteSql); - } - - @Override - protected void addToBatch(Tuple2 original, Row extracted) throws SQLException { - if (original.f0) { - super.addToBatch(original, extracted); - } else { - deleteExecutor.addToBatch(extracted); - } - } - - @Override - public synchronized void close() { - try { - super.close(); - } finally { - try { - if (deleteExecutor != null) { - deleteExecutor.closeStatements(); - } - } catch (SQLException e) { - LOG.warn("unable to close delete statement runner", e); - } - } - } - - @Override - protected void attemptFlush() throws SQLException { - super.attemptFlush(); - deleteExecutor.executeBatch(conn); - } - - @Override - public void updateExecutor(boolean reconnect) throws SQLException, ClassNotFoundException { - super.updateExecutor(reconnect); - deleteExecutor.closeStatements(); - deleteExecutor.prepareStatements(connectionProvider.getConnection()); - } - - private static JdbcBatchStatementExecutor createKeyedRowExecutor(int[] pkFields, int[] pkTypes, String sql) { - return JdbcBatchStatementExecutor.keyed( - sql, - createRowKeyExtractor(pkFields), - (st, record) -> setRecordToStatement( - st, pkTypes, createRowKeyExtractor(pkFields).apply(record))); - } - - private static JdbcBatchStatementExecutor createUpsertRowExecutor(JdbcDmlOptions opt, RuntimeContext ctx) { - checkArgument(opt.getKeyFields().isPresent()); - - int[] pkFields = Arrays.stream(opt.getKeyFields().get()) - .mapToInt(Arrays.asList(opt.getFieldNames())::indexOf) - .toArray(); - int[] pkTypes = opt.getFieldTypes() == null - ? null - : Arrays.stream(pkFields).map(f -> opt.getFieldTypes()[f]).toArray(); - - return opt.getDialect() - .getUpsertStatement( - opt.getTableName(), - opt.getFieldNames(), - opt.getKeyFields().get()) - .map(sql -> createSimpleRowExecutor( - parseNamedStatement(sql), - opt.getFieldTypes(), - ctx.getExecutionConfig().isObjectReuseEnabled())) - .orElseGet(() -> new InsertOrUpdateJdbcExecutor<>( - parseNamedStatement(opt.getDialect() - .getRowExistsStatement( - opt.getTableName(), opt.getKeyFields().get())), - parseNamedStatement( - opt.getDialect().getInsertIntoStatement(opt.getTableName(), opt.getFieldNames())), - parseNamedStatement(opt.getDialect() - .getUpdateStatement( - opt.getTableName(), - opt.getFieldNames(), - opt.getKeyFields().get())), - createRowJdbcStatementBuilder(pkTypes), - createRowJdbcStatementBuilder(opt.getFieldTypes()), - createRowJdbcStatementBuilder(opt.getFieldTypes()), - createRowKeyExtractor(pkFields), - ctx.getExecutionConfig().isObjectReuseEnabled() ? Row::copy : Function.identity())); - } - - private static String parseNamedStatement(String statement) { - return FieldNamedPreparedStatementImpl.parseNamedStatement(statement, new HashMap<>()); - } - - private static Function createRowKeyExtractor(int[] pkFields) { - return row -> getPrimaryKey(row, pkFields); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/connection/JdbcConnectionProvider.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/connection/JdbcConnectionProvider.java deleted file mode 100644 index bf159a9a4f..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/connection/JdbcConnectionProvider.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.connection; - -import org.apache.flink.annotation.Internal; - -import java.sql.Connection; -import java.sql.SQLException; - -import javax.annotation.Nullable; - -/** JDBC connection provider. */ -@Internal -public interface JdbcConnectionProvider { - /** - * Get existing connection. - * - * @return existing connection - */ - @Nullable - Connection getConnection(); - - /** - * Check whether possible existing connection is valid or not through {@link - * Connection#isValid(int)}. - * - * @return true if existing connection is valid - * @throws SQLException sql exception throw from {@link Connection#isValid(int)} - */ - boolean isConnectionValid() throws SQLException; - - /** - * Get existing connection or establish an new one if there is none. - * - * @return existing connection or newly established connection - * @throws SQLException sql exception - * @throws ClassNotFoundException driver class not found - */ - Connection getOrEstablishConnection() throws SQLException, ClassNotFoundException; - - /** Close possible existing connection. */ - void closeConnection(); - - /** - * Close possible existing connection and establish an new one. - * - * @return newly established connection - * @throws SQLException sql exception - * @throws ClassNotFoundException driver class not found - */ - Connection reestablishConnection() throws SQLException, ClassNotFoundException; -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/connection/PhoneixJdbcConnectionProvider.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/connection/PhoneixJdbcConnectionProvider.java deleted file mode 100644 index f686d82e12..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/connection/PhoneixJdbcConnectionProvider.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.connection; - -import org.apache.flink.connector.phoenix.JdbcConnectionOptions; -import org.apache.flink.util.Preconditions; - -import java.io.Serializable; -import java.sql.Connection; -import java.sql.Driver; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Enumeration; -import java.util.Properties; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * PhoneixJdbcConnectionProvider - * - * @since 2022/3/17 9:04 - */ -public class PhoneixJdbcConnectionProvider implements JdbcConnectionProvider, Serializable { - - private static final Logger LOG = LoggerFactory.getLogger(PhoneixJdbcConnectionProvider.class); - private static final long serialVersionUID = 1L; - private final JdbcConnectionOptions jdbcOptions; - private transient Driver loadedDriver; - private transient Connection connection; - private Boolean namespaceMappingEnabled; - private Boolean mapSystemTablesEnabled; - - public PhoneixJdbcConnectionProvider(JdbcConnectionOptions jdbcOptions) { - this.jdbcOptions = jdbcOptions; - } - - public PhoneixJdbcConnectionProvider( - JdbcConnectionOptions jdbcOptions, boolean namespaceMappingEnabled, boolean mapSystemTablesEnabled) { - this.jdbcOptions = jdbcOptions; - this.namespaceMappingEnabled = namespaceMappingEnabled; - this.mapSystemTablesEnabled = mapSystemTablesEnabled; - } - - public Connection getConnection() { - return this.connection; - } - - public boolean isConnectionValid() throws SQLException { - return this.connection != null && this.connection.isValid(this.jdbcOptions.getConnectionCheckTimeoutSeconds()); - } - - private static Driver loadDriver(String driverName) throws SQLException, ClassNotFoundException { - Preconditions.checkNotNull(driverName); - Enumeration drivers = DriverManager.getDrivers(); - - Driver driver; - do { - if (!drivers.hasMoreElements()) { - Class clazz = - Class.forName(driverName, true, Thread.currentThread().getContextClassLoader()); - - try { - return (Driver) clazz.newInstance(); - } catch (Exception var4) { - throw new SQLException("Fail to create driver of class " + driverName, var4); - } - } - - driver = (Driver) drivers.nextElement(); - } while (!driver.getClass().getName().equals(driverName)); - - return driver; - } - - private Driver getLoadedDriver() throws SQLException, ClassNotFoundException { - if (this.loadedDriver == null) { - this.loadedDriver = loadDriver(this.jdbcOptions.getDriverName()); - } - - return this.loadedDriver; - } - - public Connection getOrEstablishConnection() throws SQLException, ClassNotFoundException { - if (this.connection != null) { - return this.connection; - } else { - if (this.jdbcOptions.getDriverName() == null) { - this.connection = DriverManager.getConnection( - this.jdbcOptions.getDbURL(), - (String) this.jdbcOptions.getUsername().orElse((String) null), - (String) this.jdbcOptions.getPassword().orElse((String) null)); - } else { - Driver driver = this.getLoadedDriver(); - Properties info = new Properties(); - this.jdbcOptions.getUsername().ifPresent((user) -> { - info.setProperty("user", user); - }); - this.jdbcOptions.getPassword().ifPresent((password) -> { - info.setProperty("password", password); - }); - - if (this.namespaceMappingEnabled && this.mapSystemTablesEnabled) { - info.setProperty("phoenix.schema.isNamespaceMappingEnabled", "true"); - info.setProperty("phoenix.schema.mapSystemTablesToNamespace", "true"); - } - - this.connection = driver.connect(this.jdbcOptions.getDbURL(), info); - - this.connection.setAutoCommit(false); - if (this.connection == null) { - throw new SQLException("No suitable driver found for " + this.jdbcOptions.getDbURL(), "08001"); - } - } - - return this.connection; - } - } - - public void closeConnection() { - if (this.connection != null) { - try { - this.connection.close(); - } catch (SQLException var5) { - LOG.warn("JDBC connection close failed.", var5); - } finally { - this.connection = null; - } - } - } - - public Connection reestablishConnection() throws SQLException, ClassNotFoundException { - this.closeConnection(); - return this.getOrEstablishConnection(); - } - - static { - DriverManager.getDrivers(); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/AbstractJdbcRowConverter.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/AbstractJdbcRowConverter.java deleted file mode 100644 index 642d3ae120..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/AbstractJdbcRowConverter.java +++ /dev/null @@ -1,256 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.converter; - -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement; -import org.apache.flink.connector.phoenix.utils.JdbcTypeUtil; -import org.apache.flink.table.data.DecimalData; -import org.apache.flink.table.data.GenericRowData; -import org.apache.flink.table.data.RowData; -import org.apache.flink.table.data.StringData; -import org.apache.flink.table.data.TimestampData; -import org.apache.flink.table.types.logical.DecimalType; -import org.apache.flink.table.types.logical.LogicalType; -import org.apache.flink.table.types.logical.LogicalTypeRoot; -import org.apache.flink.table.types.logical.RowType; -import org.apache.flink.table.types.logical.TimestampType; -import org.apache.flink.table.types.utils.TypeConversions; - -import java.io.Serializable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Time; -import java.sql.Timestamp; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.LocalTime; - -/** Base class for all converters that convert between JDBC object and Flink internal object. */ -public abstract class AbstractJdbcRowConverter implements JdbcRowConverter { - - protected final RowType rowType; - protected final JdbcDeserializationConverter[] toInternalConverters; - protected final JdbcSerializationConverter[] toExternalConverters; - protected final LogicalType[] fieldTypes; - - public abstract String converterName(); - - public AbstractJdbcRowConverter(RowType rowType) { - this.rowType = checkNotNull(rowType); - this.fieldTypes = - rowType.getFields().stream().map(RowType.RowField::getType).toArray(LogicalType[]::new); - this.toInternalConverters = new JdbcDeserializationConverter[rowType.getFieldCount()]; - this.toExternalConverters = new JdbcSerializationConverter[rowType.getFieldCount()]; - for (int i = 0; i < rowType.getFieldCount(); i++) { - toInternalConverters[i] = createNullableInternalConverter(rowType.getTypeAt(i)); - toExternalConverters[i] = createNullableExternalConverter(fieldTypes[i]); - } - } - - @Override - public RowData toInternal(ResultSet resultSet) throws SQLException { - GenericRowData genericRowData = new GenericRowData(rowType.getFieldCount()); - for (int pos = 0; pos < rowType.getFieldCount(); pos++) { - Object field = resultSet.getObject(pos + 1); - genericRowData.setField(pos, toInternalConverters[pos].deserialize(field)); - } - return genericRowData; - } - - @Override - public FieldNamedPreparedStatement toExternal(RowData rowData, FieldNamedPreparedStatement statement) - throws SQLException { - for (int index = 0; index < rowData.getArity(); index++) { - toExternalConverters[index].serialize(rowData, index, statement); - } - return statement; - } - - /** Runtime converter to convert JDBC field to {@link RowData} type object. */ - @FunctionalInterface - interface JdbcDeserializationConverter extends Serializable { - /** - * Convert a jdbc field object of {@link ResultSet} to the internal data structure object. - * - * @param jdbcField - */ - Object deserialize(Object jdbcField) throws SQLException; - } - - /** - * Runtime converter to convert {@link RowData} field to java object and fill into the {@link - * PreparedStatement}. - */ - @FunctionalInterface - interface JdbcSerializationConverter extends Serializable { - void serialize(RowData rowData, int index, FieldNamedPreparedStatement statement) throws SQLException; - } - - /** - * Create a nullable runtime {@link JdbcDeserializationConverter} from given {@link - * LogicalType}. - */ - protected JdbcDeserializationConverter createNullableInternalConverter(LogicalType type) { - return wrapIntoNullableInternalConverter(createInternalConverter(type)); - } - - protected JdbcDeserializationConverter wrapIntoNullableInternalConverter( - JdbcDeserializationConverter jdbcDeserializationConverter) { - return val -> { - if (val == null) { - return null; - } else { - return jdbcDeserializationConverter.deserialize(val); - } - }; - } - - protected JdbcDeserializationConverter createInternalConverter(LogicalType type) { - switch (type.getTypeRoot()) { - case NULL: - return val -> null; - case BOOLEAN: - case FLOAT: - case DOUBLE: - case INTERVAL_YEAR_MONTH: - case INTERVAL_DAY_TIME: - return val -> val; - case TINYINT: - return val -> ((Integer) val).byteValue(); - case SMALLINT: - // Converter for small type that casts value to int and then return short value, - // since - // JDBC 1.0 use int type for small values. - return val -> val instanceof Integer ? ((Integer) val).shortValue() : val; - case INTEGER: - return val -> val; - case BIGINT: - return val -> val; - case DECIMAL: - final int precision = ((DecimalType) type).getPrecision(); - final int scale = ((DecimalType) type).getScale(); - // using decimal(20, 0) to support db type bigint unsigned, user should define - // decimal(20, 0) in SQL, - // but other precision like decimal(30, 0) can work too from lenient consideration. - return val -> val instanceof BigInteger - ? DecimalData.fromBigDecimal(new BigDecimal((BigInteger) val, 0), precision, scale) - : DecimalData.fromBigDecimal((BigDecimal) val, precision, scale); - case DATE: - return val -> (int) (((Date) val).toLocalDate().toEpochDay()); - case TIME_WITHOUT_TIME_ZONE: - return val -> (int) (((Time) val).toLocalTime().toNanoOfDay() / 1_000_000L); - case TIMESTAMP_WITH_TIME_ZONE: - case TIMESTAMP_WITHOUT_TIME_ZONE: - return val -> val instanceof LocalDateTime - ? TimestampData.fromLocalDateTime((LocalDateTime) val) - : TimestampData.fromTimestamp((Timestamp) val); - case CHAR: - case VARCHAR: - return val -> StringData.fromString((String) val); - case BINARY: - case VARBINARY: - return val -> (byte[]) val; - case ARRAY: - case ROW: - case MAP: - case MULTISET: - case RAW: - default: - throw new UnsupportedOperationException("Unsupported type:" + type); - } - } - - /** Create a nullable JDBC f{@link JdbcSerializationConverter} from given sql type. */ - protected JdbcSerializationConverter createNullableExternalConverter(LogicalType type) { - return wrapIntoNullableExternalConverter(createExternalConverter(type), type); - } - - protected JdbcSerializationConverter wrapIntoNullableExternalConverter( - JdbcSerializationConverter jdbcSerializationConverter, LogicalType type) { - final int sqlType = JdbcTypeUtil.typeInformationToSqlType( - TypeConversions.fromDataTypeToLegacyInfo(TypeConversions.fromLogicalToDataType(type))); - return (val, index, statement) -> { - if (val == null || val.isNullAt(index) || LogicalTypeRoot.NULL.equals(type.getTypeRoot())) { - statement.setNull(index, sqlType); - } else { - jdbcSerializationConverter.serialize(val, index, statement); - } - }; - } - - protected JdbcSerializationConverter createExternalConverter(LogicalType type) { - switch (type.getTypeRoot()) { - case BOOLEAN: - return (val, index, statement) -> statement.setBoolean(index, val.getBoolean(index)); - case TINYINT: - return (val, index, statement) -> statement.setByte(index, val.getByte(index)); - case SMALLINT: - return (val, index, statement) -> statement.setShort(index, val.getShort(index)); - case INTEGER: - case INTERVAL_YEAR_MONTH: - return (val, index, statement) -> statement.setInt(index, val.getInt(index)); - case BIGINT: - case INTERVAL_DAY_TIME: - return (val, index, statement) -> statement.setLong(index, val.getLong(index)); - case FLOAT: - return (val, index, statement) -> statement.setFloat(index, val.getFloat(index)); - case DOUBLE: - return (val, index, statement) -> statement.setDouble(index, val.getDouble(index)); - case CHAR: - case VARCHAR: - // value is BinaryString - return (val, index, statement) -> - statement.setString(index, val.getString(index).toString()); - case BINARY: - case VARBINARY: - return (val, index, statement) -> statement.setBytes(index, val.getBinary(index)); - case DATE: - return (val, index, statement) -> - statement.setDate(index, Date.valueOf(LocalDate.ofEpochDay(val.getInt(index)))); - case TIME_WITHOUT_TIME_ZONE: - return (val, index, statement) -> - statement.setTime(index, Time.valueOf(LocalTime.ofNanoOfDay(val.getInt(index) * 1_000_000L))); - case TIMESTAMP_WITH_TIME_ZONE: - case TIMESTAMP_WITHOUT_TIME_ZONE: - final int timestampPrecision = ((TimestampType) type).getPrecision(); - return (val, index, statement) -> statement.setTimestamp( - index, val.getTimestamp(index, timestampPrecision).toTimestamp()); - case DECIMAL: - final int decimalPrecision = ((DecimalType) type).getPrecision(); - final int decimalScale = ((DecimalType) type).getScale(); - return (val, index, statement) -> statement.setBigDecimal( - index, - val.getDecimal(index, decimalPrecision, decimalScale).toBigDecimal()); - case ARRAY: - case MAP: - case MULTISET: - case ROW: - case RAW: - default: - throw new UnsupportedOperationException("Unsupported type:" + type); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/JdbcRowConverter.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/JdbcRowConverter.java deleted file mode 100644 index 8a22ae9bbb..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/JdbcRowConverter.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.converter; - -import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement; -import org.apache.flink.table.data.RowData; - -import java.io.Serializable; -import java.sql.ResultSet; -import java.sql.SQLException; - -/** - * Converter that is responsible to convert between JDBC object and Flink SQL internal data - * structure {@link RowData}. - */ -public interface JdbcRowConverter extends Serializable { - - /** - * Convert data retrieved from {@link ResultSet} to internal {@link RowData}. - * - * @param resultSet ResultSet from JDBC - */ - RowData toInternal(ResultSet resultSet) throws SQLException; - - /** - * Convert data retrieved from Flink internal RowData to JDBC Object. - * - * @param rowData The given internal {@link RowData}. - * @param statement The statement to be filled. - * @return The filled statement. - */ - FieldNamedPreparedStatement toExternal(RowData rowData, FieldNamedPreparedStatement statement) throws SQLException; -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/PhoenixRowConverter.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/PhoenixRowConverter.java deleted file mode 100644 index debff6bc85..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/converter/PhoenixRowConverter.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.converter; - -import org.apache.flink.table.types.logical.RowType; - -/** - * PhoenixRowConverter - * - * @since 2022/3/16 11:21 - */ -public class PhoenixRowConverter extends AbstractJdbcRowConverter { - - private static final long serialVersionUID = 1L; - - @Override - public String converterName() { - return "Phoenix"; - } - - public PhoenixRowConverter(RowType rowType) { - super(rowType); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/InsertOrUpdateJdbcExecutor.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/InsertOrUpdateJdbcExecutor.java deleted file mode 100644 index e58f9e91c0..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/InsertOrUpdateJdbcExecutor.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.executor; - -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.connector.phoenix.JdbcStatementBuilder; -import org.apache.flink.connector.phoenix.table.PhoenixUpsertTableSink; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; - -import javax.annotation.Nonnull; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * {@link JdbcBatchStatementExecutor} that provides upsert semantics by updating row if it exists - * and inserting otherwise. Used in Table API. - * - * @deprecated This has been replaced with {@link TableInsertOrUpdateStatementExecutor}, will remove - * this once {@link PhoenixUpsertTableSink} is removed. - */ -@Internal -public final class InsertOrUpdateJdbcExecutor implements JdbcBatchStatementExecutor { - - private static final Logger LOG = LoggerFactory.getLogger(InsertOrUpdateJdbcExecutor.class); - - private final String existSQL; - private final String insertSQL; - private final String updateSQL; - - private final JdbcStatementBuilder existSetter; - private final JdbcStatementBuilder insertSetter; - private final JdbcStatementBuilder updateSetter; - - private final Function keyExtractor; - private final Function valueMapper; - - private final Map batch; - - private transient PreparedStatement existStatement; - private transient PreparedStatement insertStatement; - private transient PreparedStatement updateStatement; - - public InsertOrUpdateJdbcExecutor( - @Nonnull String existSQL, - @Nonnull String insertSQL, - @Nonnull String updateSQL, - @Nonnull JdbcStatementBuilder existSetter, - @Nonnull JdbcStatementBuilder insertSetter, - @Nonnull JdbcStatementBuilder updateSetter, - @Nonnull Function keyExtractor, - @Nonnull Function valueExtractor) { - this.existSQL = checkNotNull(existSQL); - this.updateSQL = checkNotNull(updateSQL); - this.existSetter = checkNotNull(existSetter); - this.insertSQL = checkNotNull(insertSQL); - this.insertSetter = checkNotNull(insertSetter); - this.updateSetter = checkNotNull(updateSetter); - this.keyExtractor = checkNotNull(keyExtractor); - this.valueMapper = checkNotNull(valueExtractor); - this.batch = new HashMap<>(); - } - - @Override - public void prepareStatements(Connection connection) throws SQLException { - existStatement = connection.prepareStatement(existSQL); - insertStatement = connection.prepareStatement(insertSQL); - updateStatement = connection.prepareStatement(updateSQL); - } - - @Override - public void addToBatch(R record) { - batch.put(keyExtractor.apply(record), valueMapper.apply(record)); - } - - @Override - public void executeBatch(Connection conn) throws SQLException { - if (!batch.isEmpty()) { - for (Map.Entry entry : batch.entrySet()) { - processOneRowInBatch(entry.getKey(), entry.getValue()); - } - conn.commit(); - batch.clear(); - } - } - - private void processOneRowInBatch(K pk, V row) throws SQLException { - if (exist(pk)) { - updateSetter.accept(updateStatement, row); - updateStatement.executeUpdate(); - } else { - insertSetter.accept(insertStatement, row); - insertStatement.executeUpdate(); - } - } - - private boolean exist(K pk) throws SQLException { - existSetter.accept(existStatement, pk); - try (ResultSet resultSet = existStatement.executeQuery()) { - return resultSet.next(); - } - } - - @Override - public void closeStatements() throws SQLException { - for (PreparedStatement s : Arrays.asList(existStatement, insertStatement, updateStatement)) { - if (s != null) { - s.close(); - } - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/JdbcBatchStatementExecutor.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/JdbcBatchStatementExecutor.java deleted file mode 100644 index 5fdad4d1b1..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/JdbcBatchStatementExecutor.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.executor; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.connector.phoenix.JdbcStatementBuilder; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.function.Function; - -/** Executes the given JDBC statement in batch for the accumulated records. */ -@Internal -public interface JdbcBatchStatementExecutor { - - /** Create statements from connection. */ - void prepareStatements(Connection connection) throws SQLException; - - void addToBatch(T record) throws SQLException; - - /** - * Submits a batch of commands to the database for execution. - * - * @param conn - */ - void executeBatch(Connection conn) throws SQLException; - - /** Close JDBC related statements. */ - void closeStatements() throws SQLException; - - static JdbcBatchStatementExecutor keyed( - String sql, Function keyExtractor, JdbcStatementBuilder statementBuilder) { - return new KeyedBatchStatementExecutor<>(sql, keyExtractor, statementBuilder); - } - - static JdbcBatchStatementExecutor simple( - String sql, JdbcStatementBuilder paramSetter, Function valueTransformer) { - return new SimpleBatchStatementExecutor<>(sql, paramSetter, valueTransformer); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/KeyedBatchStatementExecutor.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/KeyedBatchStatementExecutor.java deleted file mode 100644 index 050d5e4429..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/KeyedBatchStatementExecutor.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.executor; - -import org.apache.flink.connector.phoenix.JdbcStatementBuilder; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.HashSet; -import java.util.Set; -import java.util.function.Function; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A {@link JdbcBatchStatementExecutor} that extracts SQL keys from the supplied stream elements and - * executes a SQL query for them. - */ -class KeyedBatchStatementExecutor implements JdbcBatchStatementExecutor { - - private static final Logger LOG = LoggerFactory.getLogger(KeyedBatchStatementExecutor.class); - - private final String sql; - private final JdbcStatementBuilder parameterSetter; - private final Function keyExtractor; - private final Set batch; - - private transient PreparedStatement st; - - /** - * Keep in mind object reuse: if it's on then key extractor may be required to return new - * object. - */ - KeyedBatchStatementExecutor(String sql, Function keyExtractor, JdbcStatementBuilder statementBuilder) { - this.parameterSetter = statementBuilder; - this.keyExtractor = keyExtractor; - this.sql = sql; - this.batch = new HashSet<>(); - } - - @Override - public void prepareStatements(Connection connection) throws SQLException { - st = connection.prepareStatement(sql); - } - - @Override - public void addToBatch(T record) { - batch.add(keyExtractor.apply(record)); - } - - @Override - public void executeBatch(Connection conn) throws SQLException { - if (!batch.isEmpty()) { - for (K entry : batch) { - parameterSetter.accept(st, entry); - st.executeUpdate(); - } - LOG.info("connection commit datasize:" + batch.size()); - conn.commit(); - batch.clear(); - } - } - - @Override - public void closeStatements() throws SQLException { - if (st != null) { - st.close(); - st = null; - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/SimpleBatchStatementExecutor.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/SimpleBatchStatementExecutor.java deleted file mode 100644 index bc56c1e02c..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/SimpleBatchStatementExecutor.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.executor; - -import org.apache.flink.connector.phoenix.JdbcStatementBuilder; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Function; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A {@link JdbcBatchStatementExecutor} that executes supplied statement for given the records - * (without any pre-processing). - */ -class SimpleBatchStatementExecutor implements JdbcBatchStatementExecutor { - - private static final Logger LOG = LoggerFactory.getLogger(SimpleBatchStatementExecutor.class); - - private final String sql; - private final JdbcStatementBuilder parameterSetter; - private final Function valueTransformer; - private final List batch; - - private transient PreparedStatement st; - - SimpleBatchStatementExecutor( - String sql, JdbcStatementBuilder statementBuilder, Function valueTransformer) { - this.sql = sql; - this.parameterSetter = statementBuilder; - this.valueTransformer = valueTransformer; - this.batch = new ArrayList<>(); - } - - @Override - public void prepareStatements(Connection connection) throws SQLException { - this.st = connection.prepareStatement(sql); - } - - @Override - public void addToBatch(T record) { - batch.add(valueTransformer.apply(record)); - } - - @Override - public void executeBatch(Connection connection) throws SQLException { - if (!batch.isEmpty()) { - for (V r : batch) { - parameterSetter.accept(st, r); - st.executeUpdate(); - } - LOG.info("connection commit dataSize:" + batch.size()); - connection.commit(); - batch.clear(); - } - } - - @Override - public void closeStatements() throws SQLException { - if (st != null) { - st.close(); - st = null; - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableBufferReducedStatementExecutor.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableBufferReducedStatementExecutor.java deleted file mode 100644 index 49e2fb5782..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableBufferReducedStatementExecutor.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.executor; - -import org.apache.flink.api.java.tuple.Tuple2; -import org.apache.flink.table.data.RowData; -import org.apache.flink.types.RowKind; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; - -/** - * Currently, this statement executor is only used for table/sql to buffer insert/update/delete - * events, and reduce them in buffer before submit to external database. - */ -public final class TableBufferReducedStatementExecutor implements JdbcBatchStatementExecutor { - - private final JdbcBatchStatementExecutor upsertExecutor; - private final JdbcBatchStatementExecutor deleteExecutor; - private final Function keyExtractor; - private final Function valueTransform; - // the mapping is [KEY, <+/-, VALUE>] - private final Map> reduceBuffer = new HashMap<>(); - - public TableBufferReducedStatementExecutor( - JdbcBatchStatementExecutor upsertExecutor, - JdbcBatchStatementExecutor deleteExecutor, - Function keyExtractor, - Function valueTransform) { - this.upsertExecutor = upsertExecutor; - this.deleteExecutor = deleteExecutor; - this.keyExtractor = keyExtractor; - this.valueTransform = valueTransform; - } - - @Override - public void prepareStatements(Connection connection) throws SQLException { - upsertExecutor.prepareStatements(connection); - deleteExecutor.prepareStatements(connection); - } - - @Override - public void addToBatch(RowData record) throws SQLException { - RowData key = keyExtractor.apply(record); - boolean flag = changeFlag(record.getRowKind()); - RowData value = valueTransform.apply(record); // copy or not - reduceBuffer.put(key, Tuple2.of(flag, value)); - } - - /** - * Returns true if the row kind is INSERT or UPDATE_AFTER, returns false if the row kind is - * DELETE or UPDATE_BEFORE. - */ - private boolean changeFlag(RowKind rowKind) { - switch (rowKind) { - case INSERT: - case UPDATE_AFTER: - return true; - case DELETE: - case UPDATE_BEFORE: - return false; - default: - throw new UnsupportedOperationException(String.format( - "Unknown row kind, the supported row kinds is: INSERT," - + " UPDATE_BEFORE, UPDATE_AFTER, DELETE, but get: %s.", - rowKind)); - } - } - - @Override - public void executeBatch(Connection conn) throws SQLException { - for (Map.Entry> entry : reduceBuffer.entrySet()) { - if (entry.getValue().f0) { - upsertExecutor.addToBatch(entry.getValue().f1); - } else { - // delete by key - deleteExecutor.addToBatch(entry.getKey()); - } - } - upsertExecutor.executeBatch(conn); - deleteExecutor.executeBatch(conn); - reduceBuffer.clear(); - } - - @Override - public void closeStatements() throws SQLException { - upsertExecutor.closeStatements(); - deleteExecutor.closeStatements(); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableBufferedStatementExecutor.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableBufferedStatementExecutor.java deleted file mode 100644 index 0614c4eae1..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableBufferedStatementExecutor.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.executor; - -import org.apache.flink.table.data.RowData; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Function; - -/** - * Currently, this statement executor is only used for table/sql to buffer records, because the - * {@link PreparedStatement#executeBatch()} may fail and clear buffered records, so we have to - * buffer the records and replay the records when retrying {@link - * JdbcBatchStatementExecutor#executeBatch(Connection)}. - */ -public final class TableBufferedStatementExecutor implements JdbcBatchStatementExecutor { - - private final JdbcBatchStatementExecutor statementExecutor; - private final Function valueTransform; - private final List buffer = new ArrayList<>(); - - public TableBufferedStatementExecutor( - JdbcBatchStatementExecutor statementExecutor, Function valueTransform) { - this.statementExecutor = statementExecutor; - this.valueTransform = valueTransform; - } - - @Override - public void prepareStatements(Connection connection) throws SQLException { - statementExecutor.prepareStatements(connection); - } - - @Override - public void addToBatch(RowData record) throws SQLException { - RowData value = valueTransform.apply(record); // copy or not - buffer.add(value); - } - - @Override - public void executeBatch(Connection conn) throws SQLException { - for (RowData value : buffer) { - statementExecutor.addToBatch(value); - } - statementExecutor.executeBatch(conn); - buffer.clear(); - } - - @Override - public void closeStatements() throws SQLException { - statementExecutor.closeStatements(); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableInsertOrUpdateStatementExecutor.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableInsertOrUpdateStatementExecutor.java deleted file mode 100644 index d9e66d6fd7..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableInsertOrUpdateStatementExecutor.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.executor; - -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter; -import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement; -import org.apache.flink.connector.phoenix.statement.StatementFactory; -import org.apache.flink.table.data.RowData; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Arrays; -import java.util.function.Function; - -/** - * {@link JdbcBatchStatementExecutor} that provides upsert semantics by updating row if it exists - * and inserting otherwise. Only used in Table/SQL API. - */ -@Internal -public final class TableInsertOrUpdateStatementExecutor implements JdbcBatchStatementExecutor { - - private final StatementFactory existStmtFactory; - private final StatementFactory insertStmtFactory; - private final StatementFactory updateStmtFactory; - - private final JdbcRowConverter existSetter; - private final JdbcRowConverter insertSetter; - private final JdbcRowConverter updateSetter; - - private final Function keyExtractor; - - private transient FieldNamedPreparedStatement existStatement; - private transient FieldNamedPreparedStatement insertStatement; - private transient FieldNamedPreparedStatement updateStatement; - - public TableInsertOrUpdateStatementExecutor( - StatementFactory existStmtFactory, - StatementFactory insertStmtFactory, - StatementFactory updateStmtFactory, - JdbcRowConverter existSetter, - JdbcRowConverter insertSetter, - JdbcRowConverter updateSetter, - Function keyExtractor) { - this.existStmtFactory = checkNotNull(existStmtFactory); - this.insertStmtFactory = checkNotNull(insertStmtFactory); - this.updateStmtFactory = checkNotNull(updateStmtFactory); - this.existSetter = checkNotNull(existSetter); - this.insertSetter = checkNotNull(insertSetter); - this.updateSetter = checkNotNull(updateSetter); - this.keyExtractor = keyExtractor; - } - - @Override - public void prepareStatements(Connection connection) throws SQLException { - existStatement = existStmtFactory.createStatement(connection); - insertStatement = insertStmtFactory.createStatement(connection); - updateStatement = updateStmtFactory.createStatement(connection); - } - - @Override - public void addToBatch(RowData record) throws SQLException { - processOneRowInBatch(keyExtractor.apply(record), record); - } - - private void processOneRowInBatch(RowData pk, RowData row) throws SQLException { - if (exist(pk)) { - updateSetter.toExternal(row, updateStatement); - updateStatement.addBatch(); - } else { - insertSetter.toExternal(row, insertStatement); - insertStatement.addBatch(); - } - } - - private boolean exist(RowData pk) throws SQLException { - existSetter.toExternal(pk, existStatement); - try (ResultSet resultSet = existStatement.executeQuery()) { - return resultSet.next(); - } - } - - @Override - public void executeBatch(Connection conn) throws SQLException { - conn.commit(); - } - - @Override - public void closeStatements() throws SQLException { - for (FieldNamedPreparedStatement s : Arrays.asList(existStatement, insertStatement, updateStatement)) { - if (s != null) { - s.close(); - } - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableSimpleStatementExecutor.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableSimpleStatementExecutor.java deleted file mode 100644 index 67a20771a8..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/executor/TableSimpleStatementExecutor.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.executor; - -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter; -import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement; -import org.apache.flink.connector.phoenix.statement.StatementFactory; -import org.apache.flink.table.data.RowData; - -import java.sql.Connection; -import java.sql.SQLException; - -/** - * A {@link JdbcBatchStatementExecutor} that simply adds the records into batches of {@link - * java.sql.PreparedStatement} and doesn't buffer records in memory. Only used in Table/SQL API. - */ -public final class TableSimpleStatementExecutor implements JdbcBatchStatementExecutor { - - private final StatementFactory stmtFactory; - private final JdbcRowConverter converter; - - private transient FieldNamedPreparedStatement st; - - /** - * Keep in mind object reuse: if it's on then key extractor may be required to return new - * object. - */ - public TableSimpleStatementExecutor(StatementFactory stmtFactory, JdbcRowConverter converter) { - this.stmtFactory = checkNotNull(stmtFactory); - this.converter = checkNotNull(converter); - } - - @Override - public void prepareStatements(Connection connection) throws SQLException { - st = stmtFactory.createStatement(connection); - } - - @Override - public void addToBatch(RowData record) throws SQLException { - converter.toExternal(record, st); - st.addBatch(); - } - - @Override - public void executeBatch(Connection conn) throws SQLException { - // st.executeBatch(); - conn.commit(); - } - - @Override - public void closeStatements() throws SQLException { - if (st != null) { - st.close(); - st = null; - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcDmlOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcDmlOptions.java deleted file mode 100644 index 970efd6eca..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcDmlOptions.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.options; - -import org.apache.flink.connector.phoenix.dialect.JdbcDialect; -import org.apache.flink.util.Preconditions; - -import java.util.Arrays; -import java.util.Objects; -import java.util.Optional; -import java.util.stream.Stream; - -import javax.annotation.Nullable; - -/** JDBC sink DML options. */ -public class JdbcDmlOptions extends JdbcTypedQueryOptions { - - private static final long serialVersionUID = 1L; - - private final String[] fieldNames; - - @Nullable - private final String[] keyFields; - - private final String tableName; - private final JdbcDialect dialect; - - public static JdbcDmlOptionsBuilder builder() { - return new JdbcDmlOptionsBuilder(); - } - - private JdbcDmlOptions( - String tableName, JdbcDialect dialect, String[] fieldNames, int[] fieldTypes, String[] keyFields) { - super(fieldTypes); - this.tableName = Preconditions.checkNotNull(tableName, "table is empty"); - this.dialect = Preconditions.checkNotNull(dialect, "dialect name is empty"); - this.fieldNames = Preconditions.checkNotNull(fieldNames, "field names is empty"); - this.keyFields = keyFields; - } - - public String getTableName() { - return tableName; - } - - public JdbcDialect getDialect() { - return dialect; - } - - public String[] getFieldNames() { - return fieldNames; - } - - public Optional getKeyFields() { - return Optional.ofNullable(keyFields); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - JdbcDmlOptions that = (JdbcDmlOptions) o; - return Arrays.equals(fieldNames, that.fieldNames) - && Arrays.equals(keyFields, that.keyFields) - && Objects.equals(tableName, that.tableName) - && Objects.equals(dialect, that.dialect); - } - - @Override - public int hashCode() { - int result = Objects.hash(tableName, dialect); - result = 31 * result + Arrays.hashCode(fieldNames); - result = 31 * result + Arrays.hashCode(keyFields); - return result; - } - - /** Builder for {@link JdbcDmlOptions}. */ - public static class JdbcDmlOptionsBuilder extends JdbcUpdateQueryOptionsBuilder { - private String tableName; - private String[] fieldNames; - private String[] keyFields; - private JdbcDialect dialect; - - @Override - protected JdbcDmlOptionsBuilder self() { - return this; - } - - public JdbcDmlOptionsBuilder withFieldNames(String field, String... fieldNames) { - this.fieldNames = concat(field, fieldNames); - return this; - } - - public JdbcDmlOptionsBuilder withFieldNames(String[] fieldNames) { - this.fieldNames = fieldNames; - return this; - } - - public JdbcDmlOptionsBuilder withKeyFields(String keyField, String... keyFields) { - this.keyFields = concat(keyField, keyFields); - return this; - } - - public JdbcDmlOptionsBuilder withKeyFields(String[] keyFields) { - this.keyFields = keyFields; - return this; - } - - public JdbcDmlOptionsBuilder withTableName(String tableName) { - this.tableName = tableName; - return self(); - } - - public JdbcDmlOptionsBuilder withDialect(JdbcDialect dialect) { - this.dialect = dialect; - return self(); - } - - public JdbcDmlOptions build() { - return new JdbcDmlOptions(tableName, dialect, fieldNames, fieldTypes, keyFields); - } - - static String[] concat(String first, String... next) { - if (next == null || next.length == 0) { - return new String[] {first}; - } else { - return Stream.concat(Stream.of(new String[] {first}), Stream.of(next)) - .toArray(String[]::new); - } - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcInsertOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcInsertOptions.java deleted file mode 100644 index 76daf62751..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcInsertOptions.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.options; - -import org.apache.flink.util.Preconditions; - -import java.util.stream.IntStream; - -/** JDBC sink insert options. */ -public class JdbcInsertOptions extends JdbcTypedQueryOptions { - - private static final long serialVersionUID = 1L; - - private final String query; - - public JdbcInsertOptions(String query, int[] typesArray) { - super(typesArray); - this.query = Preconditions.checkNotNull(query, "query is empty"); - } - - public String getQuery() { - return query; - } - - public static JdbcInsertOptions from(String query, int firstFieldType, int... nextFieldTypes) { - return new JdbcInsertOptions(query, concat(firstFieldType, nextFieldTypes)); - } - - private static int[] concat(int first, int... next) { - if (next == null || next.length == 0) { - return new int[] {first}; - } else { - return IntStream.concat(IntStream.of(new int[] {first}), IntStream.of(next)) - .toArray(); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcLookupOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcLookupOptions.java deleted file mode 100644 index d41d8600c6..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcLookupOptions.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.options; - -import org.apache.flink.connector.phoenix.JdbcExecutionOptions; - -import java.io.Serializable; -import java.util.Objects; - -/** Options for the JDBC lookup. */ -public class JdbcLookupOptions implements Serializable { - - private final long cacheMaxSize; - private final long cacheExpireMs; - private final int maxRetryTimes; - - public JdbcLookupOptions(long cacheMaxSize, long cacheExpireMs, int maxRetryTimes) { - this.cacheMaxSize = cacheMaxSize; - this.cacheExpireMs = cacheExpireMs; - this.maxRetryTimes = maxRetryTimes; - } - - public long getCacheMaxSize() { - return cacheMaxSize; - } - - public long getCacheExpireMs() { - return cacheExpireMs; - } - - public int getMaxRetryTimes() { - return maxRetryTimes; - } - - public static Builder builder() { - return new Builder(); - } - - @Override - public boolean equals(Object o) { - if (o instanceof JdbcLookupOptions) { - JdbcLookupOptions options = (JdbcLookupOptions) o; - return Objects.equals(cacheMaxSize, options.cacheMaxSize) - && Objects.equals(cacheExpireMs, options.cacheExpireMs) - && Objects.equals(maxRetryTimes, options.maxRetryTimes); - } else { - return false; - } - } - - /** Builder of {@link JdbcLookupOptions}. */ - public static class Builder { - private long cacheMaxSize = -1L; - private long cacheExpireMs = -1L; - private int maxRetryTimes = JdbcExecutionOptions.DEFAULT_MAX_RETRY_TIMES; - - /** optional, lookup cache max size, over this value, the old data will be eliminated. */ - public Builder setCacheMaxSize(long cacheMaxSize) { - this.cacheMaxSize = cacheMaxSize; - return this; - } - - /** optional, lookup cache expire mills, over this time, the old data will expire. */ - public Builder setCacheExpireMs(long cacheExpireMs) { - this.cacheExpireMs = cacheExpireMs; - return this; - } - - /** optional, max retry times for jdbc connector. */ - public Builder setMaxRetryTimes(int maxRetryTimes) { - this.maxRetryTimes = maxRetryTimes; - return this; - } - - public JdbcLookupOptions build() { - return new JdbcLookupOptions(cacheMaxSize, cacheExpireMs, maxRetryTimes); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcOptions.java deleted file mode 100644 index 66bf1c4eda..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcOptions.java +++ /dev/null @@ -1,243 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.options; - -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.connector.phoenix.JdbcConnectionOptions; -import org.apache.flink.connector.phoenix.dialect.JdbcDialect; -import org.apache.flink.connector.phoenix.dialect.JdbcDialects; - -import java.util.Objects; -import java.util.Optional; - -import javax.annotation.Nullable; - -/** Options for the JDBC connector. */ -public class JdbcOptions extends JdbcConnectionOptions { - - private static final long serialVersionUID = 1L; - - private String tableName; - private JdbcDialect dialect; - private final @Nullable Integer parallelism; - protected boolean namespaceMappingEnabled; - protected boolean mapSystemTablesEnabled; - - protected JdbcOptions( - String dbURL, - String tableName, - String driverName, - String username, - String password, - JdbcDialect dialect, - Integer parallelism, - int connectionCheckTimeoutSeconds, - boolean namespaceMappingEnabled, - boolean mapSystemTablesEnabled) { - super(dbURL, driverName, username, password, connectionCheckTimeoutSeconds); - this.tableName = tableName; - this.dialect = dialect; - this.parallelism = parallelism; - this.namespaceMappingEnabled = namespaceMappingEnabled; - this.mapSystemTablesEnabled = mapSystemTablesEnabled; - } - - protected JdbcOptions( - String dbURL, - String tableName, - String driverName, - String username, - String password, - JdbcDialect dialect, - Integer parallelism, - int connectionCheckTimeoutSeconds) { - super(dbURL, driverName, username, password, connectionCheckTimeoutSeconds); - this.tableName = tableName; - this.dialect = dialect; - this.parallelism = parallelism; - } - - public String getTableName() { - return tableName; - } - - public JdbcDialect getDialect() { - return dialect; - } - - public Integer getParallelism() { - return parallelism; - } - - public boolean isNamespaceMappingEnabled() { - return namespaceMappingEnabled; - } - - public boolean isMapSystemTablesEnabled() { - return mapSystemTablesEnabled; - } - - public static Builder builder() { - return new Builder(); - } - - @Override - public boolean equals(Object o) { - if (o instanceof JdbcOptions) { - JdbcOptions options = (JdbcOptions) o; - return Objects.equals(url, options.url) - && Objects.equals(tableName, options.tableName) - && Objects.equals(driverName, options.driverName) - && Objects.equals(username, options.username) - && Objects.equals(password, options.password) - && Objects.equals( - dialect.getClass().getName(), - options.dialect.getClass().getName()) - && Objects.equals(parallelism, options.parallelism) - && Objects.equals(connectionCheckTimeoutSeconds, options.connectionCheckTimeoutSeconds) - && Objects.equals(namespaceMappingEnabled, options.namespaceMappingEnabled) - && Objects.equals(mapSystemTablesEnabled, options.mapSystemTablesEnabled); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - url, - tableName, - driverName, - username, - password, - dialect.getClass().getName(), - parallelism, - connectionCheckTimeoutSeconds, - namespaceMappingEnabled, - mapSystemTablesEnabled); - } - - /** Builder of {@link JdbcOptions}. */ - public static class Builder { - private String dbURL; - private String tableName; - private String driverName; - private String username; - private String password; - private JdbcDialect dialect; - private Integer parallelism; - private int connectionCheckTimeoutSeconds = 60; - protected boolean namespaceMappingEnabled; - protected boolean mapSystemTablesEnabled; - - /** required, table name. */ - public Builder setTableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** optional, user name. */ - public Builder setUsername(String username) { - this.username = username; - return this; - } - - /** optional, password. */ - public Builder setPassword(String password) { - this.password = password; - return this; - } - - /** optional, connectionCheckTimeoutSeconds. */ - public Builder setConnectionCheckTimeoutSeconds(int connectionCheckTimeoutSeconds) { - this.connectionCheckTimeoutSeconds = connectionCheckTimeoutSeconds; - return this; - } - - /** - * optional, driver name, dialect has a default driver name, See {@link - * JdbcDialect#defaultDriverName}. - */ - public Builder setDriverName(String driverName) { - this.driverName = driverName; - return this; - } - - /** required, JDBC DB url. */ - public Builder setDBUrl(String dbURL) { - this.dbURL = dbURL; - return this; - } - - /** - * optional, Handle the SQL dialect of jdbc driver. If not set, it will be infer by {@link - * JdbcDialects#get} from DB url. - */ - public Builder setDialect(JdbcDialect dialect) { - this.dialect = dialect; - return this; - } - - public Builder setParallelism(Integer parallelism) { - this.parallelism = parallelism; - return this; - } - - public Builder setNamespaceMappingEnabled(boolean namespaceMappingEnabled) { - this.namespaceMappingEnabled = namespaceMappingEnabled; - return this; - } - - public Builder setMapSystemTablesEnabled(boolean mapSystemTablesEnabled) { - this.mapSystemTablesEnabled = mapSystemTablesEnabled; - return this; - } - - public JdbcOptions build() { - checkNotNull(dbURL, "No dbURL supplied."); - checkNotNull(tableName, "No tableName supplied."); - if (this.dialect == null) { - Optional optional = JdbcDialects.get(dbURL); - this.dialect = optional.orElseGet(() -> { - throw new NullPointerException("Unknown dbURL,can not find proper dialect."); - }); - } - if (this.driverName == null) { - Optional optional = dialect.defaultDriverName(); - this.driverName = optional.orElseGet(() -> { - throw new NullPointerException("No driverName supplied."); - }); - } - - return new JdbcOptions( - dbURL, - tableName, - driverName, - username, - password, - dialect, - parallelism, - connectionCheckTimeoutSeconds, - namespaceMappingEnabled, - mapSystemTablesEnabled); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcReadOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcReadOptions.java deleted file mode 100644 index 8b10519c39..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcReadOptions.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.options; - -import java.io.Serializable; -import java.util.Objects; -import java.util.Optional; - -/** Options for the JDBC scan. */ -public class JdbcReadOptions implements Serializable { - - private final String query; - private final String partitionColumnName; - private final Long partitionLowerBound; - private final Long partitionUpperBound; - private final Integer numPartitions; - - private final int fetchSize; - private final boolean autoCommit; - - private JdbcReadOptions( - String query, - String partitionColumnName, - Long partitionLowerBound, - Long partitionUpperBound, - Integer numPartitions, - int fetchSize, - boolean autoCommit) { - this.query = query; - this.partitionColumnName = partitionColumnName; - this.partitionLowerBound = partitionLowerBound; - this.partitionUpperBound = partitionUpperBound; - this.numPartitions = numPartitions; - - this.fetchSize = fetchSize; - this.autoCommit = autoCommit; - } - - public Optional getQuery() { - return Optional.ofNullable(query); - } - - public Optional getPartitionColumnName() { - return Optional.ofNullable(partitionColumnName); - } - - public Optional getPartitionLowerBound() { - return Optional.ofNullable(partitionLowerBound); - } - - public Optional getPartitionUpperBound() { - return Optional.ofNullable(partitionUpperBound); - } - - public Optional getNumPartitions() { - return Optional.ofNullable(numPartitions); - } - - public int getFetchSize() { - return fetchSize; - } - - public boolean getAutoCommit() { - return autoCommit; - } - - public static Builder builder() { - return new Builder(); - } - - @Override - public boolean equals(Object o) { - if (o instanceof JdbcReadOptions) { - JdbcReadOptions options = (JdbcReadOptions) o; - return Objects.equals(query, options.query) - && Objects.equals(partitionColumnName, options.partitionColumnName) - && Objects.equals(partitionLowerBound, options.partitionLowerBound) - && Objects.equals(partitionUpperBound, options.partitionUpperBound) - && Objects.equals(numPartitions, options.numPartitions) - && Objects.equals(fetchSize, options.fetchSize) - && Objects.equals(autoCommit, options.autoCommit); - } else { - return false; - } - } - - /** Builder of {@link JdbcReadOptions}. */ - public static class Builder { - protected String query; - protected String partitionColumnName; - protected Long partitionLowerBound; - protected Long partitionUpperBound; - protected Integer numPartitions; - - protected int fetchSize = 0; - protected boolean autoCommit = true; - - /** optional, SQL query statement for this JDBC source. */ - public Builder setQuery(String query) { - this.query = query; - return this; - } - - /** optional, name of the column used for partitioning the input. */ - public Builder setPartitionColumnName(String partitionColumnName) { - this.partitionColumnName = partitionColumnName; - return this; - } - - /** optional, the smallest value of the first partition. */ - public Builder setPartitionLowerBound(long partitionLowerBound) { - this.partitionLowerBound = partitionLowerBound; - return this; - } - - /** optional, the largest value of the last partition. */ - public Builder setPartitionUpperBound(long partitionUpperBound) { - this.partitionUpperBound = partitionUpperBound; - return this; - } - - /** - * optional, the maximum number of partitions that can be used for parallelism in table - * reading. - */ - public Builder setNumPartitions(int numPartitions) { - this.numPartitions = numPartitions; - return this; - } - - /** - * optional, the number of rows to fetch per round trip. default value is 0, according to - * the jdbc api, 0 means that fetchSize hint will be ignored. - */ - public Builder setFetchSize(int fetchSize) { - this.fetchSize = fetchSize; - return this; - } - - /** optional, whether to set auto commit on the JDBC driver. */ - public Builder setAutoCommit(boolean autoCommit) { - this.autoCommit = autoCommit; - return this; - } - - public JdbcReadOptions build() { - return new JdbcReadOptions( - query, - partitionColumnName, - partitionLowerBound, - partitionUpperBound, - numPartitions, - fetchSize, - autoCommit); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcTypedQueryOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcTypedQueryOptions.java deleted file mode 100644 index 938bf0a617..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/JdbcTypedQueryOptions.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.options; - -import java.io.Serializable; - -import javax.annotation.Nullable; - -/** Jdbc query type options. */ -abstract class JdbcTypedQueryOptions implements Serializable { - - @Nullable - private final int[] fieldTypes; - - JdbcTypedQueryOptions(int[] fieldTypes) { - this.fieldTypes = fieldTypes; - } - - public int[] getFieldTypes() { - return fieldTypes; - } - - public abstract static class JdbcUpdateQueryOptionsBuilder> { - int[] fieldTypes; - - protected abstract T self(); - - public T withFieldTypes(int[] fieldTypes) { - this.fieldTypes = fieldTypes; - return self(); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcExecutionOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcExecutionOptions.java deleted file mode 100644 index 02be7fe324..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcExecutionOptions.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.options; - -import org.apache.flink.util.Preconditions; - -import java.io.Serializable; -import java.util.Objects; - -public class PhoenixJdbcExecutionOptions implements Serializable { - public static final int DEFAULT_MAX_RETRY_TIMES = 3; - private static final int DEFAULT_INTERVAL_MILLIS = 0; - public static final int DEFAULT_SIZE = 5000; - private final long batchIntervalMs; - private final int batchSize; - private final int maxRetries; - - private PhoenixJdbcExecutionOptions(long batchIntervalMs, int batchSize, int maxRetries) { - Preconditions.checkArgument(maxRetries >= 0); - this.batchIntervalMs = batchIntervalMs; - this.batchSize = batchSize; - this.maxRetries = maxRetries; - } - - public long getBatchIntervalMs() { - return this.batchIntervalMs; - } - - public int getBatchSize() { - return this.batchSize; - } - - public int getMaxRetries() { - return this.maxRetries; - } - - public boolean equals(Object o) { - if (this == o) { - return true; - } else if (o != null && this.getClass() == o.getClass()) { - PhoenixJdbcExecutionOptions that = (PhoenixJdbcExecutionOptions) o; - return this.batchIntervalMs == that.batchIntervalMs - && this.batchSize == that.batchSize - && this.maxRetries == that.maxRetries; - } else { - return false; - } - } - - public int hashCode() { - return Objects.hash(new Object[] {this.batchIntervalMs, this.batchSize, this.maxRetries}); - } - - public static Builder builder() { - return new Builder(); - } - - public static PhoenixJdbcExecutionOptions defaults() { - return builder().build(); - } - - public static final class Builder { - private long intervalMs = 0L; - private int size = 5000; - private int maxRetries = 3; - - public Builder() {} - - public Builder withBatchSize(int size) { - this.size = size; - return this; - } - - public Builder withBatchIntervalMs(long intervalMs) { - this.intervalMs = intervalMs; - return this; - } - - public Builder withMaxRetries(int maxRetries) { - this.maxRetries = maxRetries; - return this; - } - - public PhoenixJdbcExecutionOptions build() { - return new PhoenixJdbcExecutionOptions(this.intervalMs, this.size, this.maxRetries); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcLookupOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcLookupOptions.java deleted file mode 100644 index 803ddcdfc1..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcLookupOptions.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.options; - -import java.io.Serializable; -import java.util.Objects; - -public class PhoenixJdbcLookupOptions implements Serializable { - private final long cacheMaxSize; - private final long cacheExpireMs; - private final int maxRetryTimes; - - public PhoenixJdbcLookupOptions(long cacheMaxSize, long cacheExpireMs, int maxRetryTimes) { - this.cacheMaxSize = cacheMaxSize; - this.cacheExpireMs = cacheExpireMs; - this.maxRetryTimes = maxRetryTimes; - } - - public long getCacheMaxSize() { - return this.cacheMaxSize; - } - - public long getCacheExpireMs() { - return this.cacheExpireMs; - } - - public int getMaxRetryTimes() { - return this.maxRetryTimes; - } - - public static Builder builder() { - return new Builder(); - } - - public boolean equals(Object o) { - if (!(o instanceof PhoenixJdbcLookupOptions)) { - return false; - } else { - PhoenixJdbcLookupOptions options = (PhoenixJdbcLookupOptions) o; - return Objects.equals(this.cacheMaxSize, options.cacheMaxSize) - && Objects.equals(this.cacheExpireMs, options.cacheExpireMs) - && Objects.equals(this.maxRetryTimes, options.maxRetryTimes); - } - } - - public static class Builder { - private long cacheMaxSize = -1L; - private long cacheExpireMs = -1L; - private int maxRetryTimes = 3; - - public Builder() {} - - public Builder setCacheMaxSize(long cacheMaxSize) { - this.cacheMaxSize = cacheMaxSize; - return this; - } - - public Builder setCacheExpireMs(long cacheExpireMs) { - this.cacheExpireMs = cacheExpireMs; - return this; - } - - public Builder setMaxRetryTimes(int maxRetryTimes) { - this.maxRetryTimes = maxRetryTimes; - return this; - } - - public PhoenixJdbcLookupOptions build() { - return new PhoenixJdbcLookupOptions(this.cacheMaxSize, this.cacheExpireMs, this.maxRetryTimes); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcOptions.java deleted file mode 100644 index 63f2ee73a9..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcOptions.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.options; - -import org.apache.flink.connector.phoenix.JdbcConnectionOptions; -import org.apache.flink.connector.phoenix.dialect.JdbcDialect; -import org.apache.flink.connector.phoenix.dialect.JdbcDialects; -import org.apache.flink.util.Preconditions; - -import java.util.Objects; -import java.util.Optional; - -import javax.annotation.Nullable; - -/** - * PhoenixJdbcOptions - * - * @since 2022/3/17 9:57 - */ -public class PhoenixJdbcOptions extends JdbcConnectionOptions { - private static final long serialVersionUID = 1L; - private String tableName; - private JdbcDialect dialect; - - @Nullable - private final Integer parallelism; - // setting phoenix schema isEnabled - private Boolean isNamespaceMappingEnabled; - private Boolean mapSystemTablesToNamespace; - - private PhoenixJdbcOptions( - String dbURL, - String tableName, - String driverName, - String username, - String password, - JdbcDialect dialect, - Integer parallelism, - int connectionCheckTimeoutSeconds, - boolean isNamespaceMappingEnabled, - boolean mapSystemTablesToNamespace) { - super(dbURL, driverName, username, password, connectionCheckTimeoutSeconds); - this.tableName = tableName; - this.dialect = dialect; - this.parallelism = parallelism; - this.isNamespaceMappingEnabled = isNamespaceMappingEnabled; - this.mapSystemTablesToNamespace = mapSystemTablesToNamespace; - } - - public String getTableName() { - return this.tableName; - } - - public JdbcDialect getDialect() { - return this.dialect; - } - - public Integer getParallelism() { - return this.parallelism; - } - - public Boolean getNamespaceMappingEnabled() { - return isNamespaceMappingEnabled; - } - - public Boolean getMapSystemTablesToNamespace() { - return mapSystemTablesToNamespace; - } - - public static Builder builder() { - return new Builder(); - } - - public boolean equals(Object o) { - if (!(o instanceof PhoenixJdbcOptions)) { - return false; - } else { - PhoenixJdbcOptions options = (PhoenixJdbcOptions) o; - return Objects.equals(this.url, options.url) - && Objects.equals(this.tableName, options.tableName) - && Objects.equals(this.driverName, options.driverName) - && Objects.equals(this.username, options.username) - && Objects.equals(this.password, options.password) - && Objects.equals( - this.dialect.getClass().getName(), - options.dialect.getClass().getName()) - && Objects.equals(this.parallelism, options.parallelism) - && Objects.equals(this.connectionCheckTimeoutSeconds, options.connectionCheckTimeoutSeconds) - && Objects.equals(this.isNamespaceMappingEnabled, options.isNamespaceMappingEnabled) - && Objects.equals(this.mapSystemTablesToNamespace, options.mapSystemTablesToNamespace); - } - } - - public int hashCode() { - return Objects.hash(new Object[] { - this.url, - this.tableName, - this.driverName, - this.username, - this.password, - this.dialect.getClass().getName(), - this.parallelism, - this.connectionCheckTimeoutSeconds, - this.isNamespaceMappingEnabled, - this.mapSystemTablesToNamespace - }); - } - - public static class Builder { - private String dbURL; - private String tableName; - private String driverName; - private String username; - private String password; - private JdbcDialect dialect; - private Integer parallelism; - private int connectionCheckTimeoutSeconds = 60; - private Boolean isNamespaceMappingEnabled; - private Boolean mapSystemTablesToNamespace; - - public Builder() {} - - public Builder setTableName(String tableName) { - this.tableName = tableName; - return this; - } - - public Builder setUsername(String username) { - this.username = username; - return this; - } - - public Builder setPassword(String password) { - this.password = password; - return this; - } - - public Builder setConnectionCheckTimeoutSeconds(int connectionCheckTimeoutSeconds) { - this.connectionCheckTimeoutSeconds = connectionCheckTimeoutSeconds; - return this; - } - - public Builder setDriverName(String driverName) { - this.driverName = driverName; - return this; - } - - public Builder setDBUrl(String dbURL) { - this.dbURL = dbURL; - return this; - } - - public Builder setDialect(JdbcDialect dialect) { - this.dialect = dialect; - return this; - } - - public Builder setParallelism(Integer parallelism) { - this.parallelism = parallelism; - return this; - } - - public Builder setNamespaceMappingEnabled(Boolean namespaceMappingEnabled) { - this.isNamespaceMappingEnabled = namespaceMappingEnabled; - return this; - } - - public Builder setMapSystemTablesToNamespace(Boolean mapSystemTablesToNamespace) { - this.mapSystemTablesToNamespace = mapSystemTablesToNamespace; - return this; - } - - public PhoenixJdbcOptions build() { - Preconditions.checkNotNull(this.dbURL, "No dbURL supplied."); - Preconditions.checkNotNull(this.tableName, "No tableName supplied."); - Optional optional; - if (this.dialect == null) { - optional = JdbcDialects.get(this.dbURL); - this.dialect = (JdbcDialect) optional.orElseGet(() -> { - throw new NullPointerException("Unknown dbURL,can not find proper dialect."); - }); - } - - if (this.driverName == null) { - optional = this.dialect.defaultDriverName(); - this.driverName = (String) optional.orElseGet(() -> { - throw new NullPointerException("No driverName supplied."); - }); - } - - return new PhoenixJdbcOptions( - this.dbURL, - this.tableName, - this.driverName, - this.username, - this.password, - this.dialect, - this.parallelism, - this.connectionCheckTimeoutSeconds, - this.isNamespaceMappingEnabled, - this.mapSystemTablesToNamespace); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcReadOptions.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcReadOptions.java deleted file mode 100644 index a6d323236c..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/internal/options/PhoenixJdbcReadOptions.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.internal.options; - -import java.io.Serializable; -import java.util.Objects; -import java.util.Optional; - -public class PhoenixJdbcReadOptions implements Serializable { - - private final String query; - private final String partitionColumnName; - private final Long partitionLowerBound; - private final Long partitionUpperBound; - private final Integer numPartitions; - private final int fetchSize; - private final boolean autoCommit; - - private PhoenixJdbcReadOptions( - String query, - String partitionColumnName, - Long partitionLowerBound, - Long partitionUpperBound, - Integer numPartitions, - int fetchSize, - boolean autoCommit) { - this.query = query; - this.partitionColumnName = partitionColumnName; - this.partitionLowerBound = partitionLowerBound; - this.partitionUpperBound = partitionUpperBound; - this.numPartitions = numPartitions; - this.fetchSize = fetchSize; - this.autoCommit = autoCommit; - } - - public Optional getQuery() { - return Optional.ofNullable(this.query); - } - - public Optional getPartitionColumnName() { - return Optional.ofNullable(this.partitionColumnName); - } - - public Optional getPartitionLowerBound() { - return Optional.ofNullable(this.partitionLowerBound); - } - - public Optional getPartitionUpperBound() { - return Optional.ofNullable(this.partitionUpperBound); - } - - public Optional getNumPartitions() { - return Optional.ofNullable(this.numPartitions); - } - - public int getFetchSize() { - return this.fetchSize; - } - - public boolean getAutoCommit() { - return this.autoCommit; - } - - public static Builder builder() { - return new Builder(); - } - - public boolean equals(Object o) { - if (!(o instanceof JdbcReadOptions)) { - return false; - } else { - PhoenixJdbcReadOptions options = (PhoenixJdbcReadOptions) o; - return Objects.equals(this.query, options.query) - && Objects.equals(this.partitionColumnName, options.partitionColumnName) - && Objects.equals(this.partitionLowerBound, options.partitionLowerBound) - && Objects.equals(this.partitionUpperBound, options.partitionUpperBound) - && Objects.equals(this.numPartitions, options.numPartitions) - && Objects.equals(this.fetchSize, options.fetchSize) - && Objects.equals(this.autoCommit, options.autoCommit); - } - } - - public static class Builder { - protected String query; - protected String partitionColumnName; - protected Long partitionLowerBound; - protected Long partitionUpperBound; - protected Integer numPartitions; - protected int fetchSize = 0; - protected boolean autoCommit = true; - - public Builder() {} - - public Builder setQuery(String query) { - this.query = query; - return this; - } - - public Builder setPartitionColumnName(String partitionColumnName) { - this.partitionColumnName = partitionColumnName; - return this; - } - - public Builder setPartitionLowerBound(long partitionLowerBound) { - this.partitionLowerBound = partitionLowerBound; - return this; - } - - public Builder setPartitionUpperBound(long partitionUpperBound) { - this.partitionUpperBound = partitionUpperBound; - return this; - } - - public Builder setNumPartitions(int numPartitions) { - this.numPartitions = numPartitions; - return this; - } - - public Builder setFetchSize(int fetchSize) { - this.fetchSize = fetchSize; - return this; - } - - public Builder setAutoCommit(boolean autoCommit) { - this.autoCommit = autoCommit; - return this; - } - - public PhoenixJdbcReadOptions build() { - return new PhoenixJdbcReadOptions( - this.query, - this.partitionColumnName, - this.partitionLowerBound, - this.partitionUpperBound, - this.numPartitions, - this.fetchSize, - this.autoCommit); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcGenericParameterValuesProvider.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcGenericParameterValuesProvider.java deleted file mode 100644 index 3c588d588e..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcGenericParameterValuesProvider.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.split; - -import org.apache.flink.annotation.Experimental; -import org.apache.flink.connector.phoenix.PhoenixInputFormat; - -import java.io.Serializable; - -/** - * This splits generator actually does nothing but wrapping the query parameters computed by the - * user before creating the {@link PhoenixInputFormat} instance. - */ -@Experimental -public class JdbcGenericParameterValuesProvider implements JdbcParameterValuesProvider { - - private final Serializable[][] parameters; - - public JdbcGenericParameterValuesProvider(Serializable[][] parameters) { - this.parameters = parameters; - } - - @Override - public Serializable[][] getParameterValues() { - // do nothing...precomputed externally - return parameters; - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcNumericBetweenParametersProvider.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcNumericBetweenParametersProvider.java deleted file mode 100644 index 4d1a6eadbb..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcNumericBetweenParametersProvider.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.split; - -import org.apache.flink.annotation.Experimental; -import org.apache.flink.util.Preconditions; - -import java.io.Serializable; - -/** - * This query parameters generator is an helper class to parameterize from/to queries on a numeric - * column. The generated array of from/to values will be equally sized to fetchSize (apart from the - * last one), ranging from minVal up to maxVal. - * - *

For example, if there's a table BOOKS with a numeric PK id, using a - * query like: - * - *

- * SELECT * FROM BOOKS WHERE id BETWEEN ? AND ?
- * 
- * - *

You can take advantage of this class to automatically generate the parameters of the BETWEEN - * clause, based on the passed constructor parameters. - */ -@Experimental -public class JdbcNumericBetweenParametersProvider implements JdbcParameterValuesProvider { - - private final long minVal; - private final long maxVal; - - private long batchSize; - private int batchNum; - - /** - * NumericBetweenParametersProviderJdbc constructor. - * - * @param minVal the lower bound of the produced "from" values - * @param maxVal the upper bound of the produced "to" values - */ - public JdbcNumericBetweenParametersProvider(long minVal, long maxVal) { - Preconditions.checkArgument(minVal <= maxVal, "minVal must not be larger than maxVal"); - this.minVal = minVal; - this.maxVal = maxVal; - } - - /** - * NumericBetweenParametersProviderJdbc constructor. - * - * @param fetchSize the max distance between the produced from/to pairs - * @param minVal the lower bound of the produced "from" values - * @param maxVal the upper bound of the produced "to" values - */ - public JdbcNumericBetweenParametersProvider(long fetchSize, long minVal, long maxVal) { - Preconditions.checkArgument(minVal <= maxVal, "minVal must not be larger than maxVal"); - this.minVal = minVal; - this.maxVal = maxVal; - ofBatchSize(fetchSize); - } - - public JdbcNumericBetweenParametersProvider ofBatchSize(long batchSize) { - Preconditions.checkArgument(batchSize > 0, "Batch size must be positive"); - - long maxElemCount = (maxVal - minVal) + 1; - if (batchSize > maxElemCount) { - batchSize = maxElemCount; - } - this.batchSize = batchSize; - this.batchNum = new Double(Math.ceil((double) maxElemCount / batchSize)).intValue(); - return this; - } - - public JdbcNumericBetweenParametersProvider ofBatchNum(int batchNum) { - Preconditions.checkArgument(batchNum > 0, "Batch number must be positive"); - - long maxElemCount = (maxVal - minVal) + 1; - if (batchNum > maxElemCount) { - batchNum = (int) maxElemCount; - } - this.batchNum = batchNum; - this.batchSize = new Double(Math.ceil((double) maxElemCount / batchNum)).longValue(); - return this; - } - - @Override - public Serializable[][] getParameterValues() { - Preconditions.checkState( - batchSize > 0, - "Batch size and batch number must be positive. Have you called `ofBatchSize` or" + " `ofBatchNum`?"); - - long maxElemCount = (maxVal - minVal) + 1; - long bigBatchNum = maxElemCount - (batchSize - 1) * batchNum; - - Serializable[][] parameters = new Serializable[batchNum][2]; - long start = minVal; - for (int i = 0; i < batchNum; i++) { - long end = start + batchSize - 1 - (i >= bigBatchNum ? 1 : 0); - parameters[i] = new Long[] {start, end}; - start = end + 1; - } - return parameters; - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcParameterValuesProvider.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcParameterValuesProvider.java deleted file mode 100644 index d674f092d7..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/split/JdbcParameterValuesProvider.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.split; - -import org.apache.flink.annotation.Experimental; -import org.apache.flink.connector.phoenix.PhoenixInputFormat; - -import java.io.Serializable; - -/** - * This interface is used by the {@link PhoenixInputFormat} to compute the list of parallel query to - * run (i.e. splits). Each query will be parameterized using a row of the matrix provided by each - * {@link JdbcParameterValuesProvider} implementation. - */ -@Experimental -public interface JdbcParameterValuesProvider { - - /** Returns the necessary parameters array to use for query in parallel a table. */ - Serializable[][] getParameterValues(); -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/FieldNamedPreparedStatement.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/FieldNamedPreparedStatement.java deleted file mode 100644 index 8b1ac34cfb..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/FieldNamedPreparedStatement.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.statement; - -import java.math.BigDecimal; -import java.sql.Connection; -import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Time; -import java.sql.Timestamp; - -/** - * This is a wrapper around {@link PreparedStatement} and allows the users to set parameters by name - * instead of by index. This allows users to use the same variable parameter multiple times in a - * statement. - * - *

Code such as this: - * - *

- *   Connection con = getConnection();
- *   String query = "select * from my_table where first_name=? or last_name=?";
- *   PreparedStatement st = con.prepareStatement(query);
- *   st.setString(1, "bob");
- *   st.setString(2, "bob");
- *   ResultSet rs = st.executeQuery();
- * 
- * - *

Can be replaced with: - * - *

- *   Connection con = getConnection();
- *   String query = "select * from my_table where first_name=:name or last_name=:name";
- *   FieldNamedPreparedStatement st = FieldNamedPreparedStatement.prepareStatement(con, query, new String[]{"name"});
- *   st.setString(0, "bob");
- *   ResultSet rs = st.executeQuery();
- * 
- */ -public interface FieldNamedPreparedStatement extends AutoCloseable { - - /** - * Creates a NamedPreparedStatement object for sending parameterized SQL statements - * to the database. - * - * @param connection the connection used to connect to database. - * @param sql an SQL statement that may contain one or more ':fieldName' as parameter - * placeholders - * @param fieldNames the field names in schema order used as the parameter names - */ - static FieldNamedPreparedStatement prepareStatement(Connection connection, String sql, String[] fieldNames) - throws SQLException { - return FieldNamedPreparedStatementImpl.prepareStatement(connection, sql, fieldNames); - } - - /** - * Clears the current parameter values immediately. - * - *

In general, parameter values remain in force for repeated use of a statement. Setting a - * parameter value automatically clears its previous value. However, in some cases it is useful - * to immediately release the resources used by the current parameter values; this can be done - * by calling the method clearParameters. - * - * @see PreparedStatement#clearParameters() - */ - void clearParameters() throws SQLException; - - /** - * Executes the SQL query in this NamedPreparedStatement object and returns the - * ResultSet object generated by the query. - * - * @see PreparedStatement#executeQuery() - */ - ResultSet executeQuery() throws SQLException; - - /** - * Adds a set of parameters to this NamedPreparedStatement object's batch of - * commands. - * - * @see PreparedStatement#addBatch() - */ - void addBatch() throws SQLException; - - /** - * Submits a batch of commands to the database for execution and if all commands execute - * successfully, returns an array of update counts. The int elements of the array - * that is returned are ordered to correspond to the commands in the batch, which are ordered - * according to the order in which they were added to the batch. - * - * @see PreparedStatement#executeBatch() - */ - int[] executeBatch() throws SQLException; - - /** - * Phoenix add Batch method - * - * @see PreparedStatement#executeBatch() - */ - void executeUpdate() throws SQLException; - - /** - * Sets the designated parameter to SQL NULL. - * - *

Note: You must specify the parameter's SQL type. - * - * @see PreparedStatement#setNull(int, int) - */ - void setNull(int fieldIndex, int sqlType) throws SQLException; - - /** - * Sets the designated parameter to the given Java boolean value. The driver - * converts this to an SQL BIT or BOOLEAN value when it sends it to - * the database. - * - * @see PreparedStatement#setBoolean(int, boolean) - */ - void setBoolean(int fieldIndex, boolean x) throws SQLException; - - /** - * Sets the designated parameter to the given Java byte value. The driver converts - * this to an SQL TINYINT value when it sends it to the database. - * - * @see PreparedStatement#setByte(int, byte) - */ - void setByte(int fieldIndex, byte x) throws SQLException; - - /** - * Sets the designated parameter to the given Java short value. The driver converts - * this to an SQL SMALLINT value when it sends it to the database. - * - * @see PreparedStatement#setShort(int, short) - */ - void setShort(int fieldIndex, short x) throws SQLException; - - /** - * Sets the designated parameter to the given Java int value. The driver converts - * this to an SQL INTEGER value when it sends it to the database. - * - * @see PreparedStatement#setInt(int, int) - */ - void setInt(int fieldIndex, int x) throws SQLException; - - /** - * Sets the designated parameter to the given Java long value. The driver converts - * this to an SQL BIGINT value when it sends it to the database. - * - * @see PreparedStatement#setLong(int, long) - */ - void setLong(int fieldIndex, long x) throws SQLException; - - /** - * Sets the designated parameter to the given Java float value. The driver converts - * this to an SQL REAL value when it sends it to the database. - * - * @see PreparedStatement#setFloat(int, float) - */ - void setFloat(int fieldIndex, float x) throws SQLException; - - /** - * Sets the designated parameter to the given Java double value. The driver - * converts this to an SQL DOUBLE value when it sends it to the database. - * - * @see PreparedStatement#setDouble(int, double) - */ - void setDouble(int fieldIndex, double x) throws SQLException; - - /** - * Sets the designated parameter to the given java.math.BigDecimal value. The - * driver converts this to an SQL NUMERIC value when it sends it to the database. - * - * @see PreparedStatement#setBigDecimal(int, BigDecimal) - */ - void setBigDecimal(int fieldIndex, BigDecimal x) throws SQLException; - - /** - * Sets the designated parameter to the given Java String value. The driver - * converts this to an SQL VARCHAR or LONGVARCHAR value (depending on - * the argument's size relative to the driver's limits on VARCHAR values) when it - * sends it to the database. - * - * @see PreparedStatement#setString(int, String) - */ - void setString(int fieldIndex, String x) throws SQLException; - - /** - * Sets the designated parameter to the given Java array of bytes. The driver converts this to - * an SQL VARBINARY or LONGVARBINARY (depending on the argument's size - * relative to the driver's limits on VARBINARY values) when it sends it to the - * database. - * - * @see PreparedStatement#setBytes(int, byte[]) - */ - void setBytes(int fieldIndex, byte[] x) throws SQLException; - - /** - * Sets the designated parameter to the given java.sql.Date value using the default - * time zone of the virtual machine that is running the application. The driver converts this to - * an SQL DATE value when it sends it to the database. - * - * @see PreparedStatement#setDate(int, Date) - */ - void setDate(int fieldIndex, Date x) throws SQLException; - - /** - * Sets the designated parameter to the given java.sql.Time value. The driver - * converts this to an SQL TIME value when it sends it to the database. - * - * @see PreparedStatement#setTime(int, Time) - */ - void setTime(int fieldIndex, Time x) throws SQLException; - - /** - * Sets the designated parameter to the given java.sql.Timestamp value. The driver - * converts this to an SQL TIMESTAMP value when it sends it to the database. - * - * @see PreparedStatement#setTimestamp(int, Timestamp) - */ - void setTimestamp(int fieldIndex, Timestamp x) throws SQLException; - - /** - * Sets the value of the designated parameter using the given object. - * - * @see PreparedStatement#setObject(int, Object) - */ - void setObject(int fieldIndex, Object x) throws SQLException; - - /** - * Releases this Statement object's database and JDBC resources immediately instead - * of waiting for this to happen when it is automatically closed. It is generally good practice - * to release resources as soon as you are finished with them to avoid tying up database - * resources. - * - * @see PreparedStatement#close() - */ - void close() throws SQLException; -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/FieldNamedPreparedStatementImpl.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/FieldNamedPreparedStatementImpl.java deleted file mode 100644 index e81c658b8f..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/FieldNamedPreparedStatementImpl.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.statement; - -import static org.apache.flink.util.Preconditions.checkArgument; -import static org.apache.flink.util.Preconditions.checkNotNull; - -import java.math.BigDecimal; -import java.sql.Connection; -import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** Simple implementation of {@link FieldNamedPreparedStatement}. */ -public class FieldNamedPreparedStatementImpl implements FieldNamedPreparedStatement { - - private final PreparedStatement statement; - private final int[][] indexMapping; - - private FieldNamedPreparedStatementImpl(PreparedStatement statement, int[][] indexMapping) { - this.statement = statement; - this.indexMapping = indexMapping; - } - - @Override - public void clearParameters() throws SQLException { - statement.clearParameters(); - } - - @Override - public ResultSet executeQuery() throws SQLException { - return statement.executeQuery(); - } - - @Override - public void addBatch() throws SQLException { - statement.executeUpdate(); - } - - @Override - public void executeUpdate() throws SQLException { - statement.executeUpdate(); - } - - @Override - public int[] executeBatch() throws SQLException { - return statement.executeBatch(); - } - - @Override - public void setNull(int fieldIndex, int sqlType) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setNull(index, sqlType); - } - } - - @Override - public void setBoolean(int fieldIndex, boolean x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setBoolean(index, x); - } - } - - @Override - public void setByte(int fieldIndex, byte x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setByte(index, x); - } - } - - @Override - public void setShort(int fieldIndex, short x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setShort(index, x); - } - } - - @Override - public void setInt(int fieldIndex, int x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setInt(index, x); - } - } - - @Override - public void setLong(int fieldIndex, long x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setLong(index, x); - } - } - - @Override - public void setFloat(int fieldIndex, float x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setFloat(index, x); - } - } - - @Override - public void setDouble(int fieldIndex, double x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setDouble(index, x); - } - } - - @Override - public void setBigDecimal(int fieldIndex, BigDecimal x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setBigDecimal(index, x); - } - } - - @Override - public void setString(int fieldIndex, String x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setString(index, x); - } - } - - @Override - public void setBytes(int fieldIndex, byte[] x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setBytes(index, x); - } - } - - @Override - public void setDate(int fieldIndex, Date x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setDate(index, x); - } - } - - @Override - public void setTime(int fieldIndex, Time x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setTime(index, x); - } - } - - @Override - public void setTimestamp(int fieldIndex, Timestamp x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setTimestamp(index, x); - } - } - - @Override - public void setObject(int fieldIndex, Object x) throws SQLException { - for (int index : indexMapping[fieldIndex]) { - statement.setObject(index, x); - } - } - - @Override - public void close() throws SQLException { - statement.close(); - } - - // ---------------------------------------------------------------------------------------- - - public static FieldNamedPreparedStatement prepareStatement(Connection connection, String sql, String[] fieldNames) - throws SQLException { - checkNotNull(connection, "connection must not be null."); - checkNotNull(sql, "sql must not be null."); - checkNotNull(fieldNames, "fieldNames must not be null."); - - if (sql.contains("?")) { - throw new IllegalArgumentException("SQL statement must not contain ? character."); - } - - HashMap> parameterMap = new HashMap<>(); - String parsedSQL = parseNamedStatement(sql, parameterMap); - // currently, the statements must contain all the field parameters - checkArgument(parameterMap.size() == fieldNames.length); - int[][] indexMapping = new int[fieldNames.length][]; - for (int i = 0; i < fieldNames.length; i++) { - String fieldName = fieldNames[i]; - checkArgument( - parameterMap.containsKey(fieldName), - fieldName + " doesn't exist in the parameters of SQL statement: " + sql); - indexMapping[i] = - parameterMap.get(fieldName).stream().mapToInt(v -> v).toArray(); - } - - return new FieldNamedPreparedStatementImpl(connection.prepareStatement(parsedSQL), indexMapping); - } - - /** - * Parses a sql with named parameters. The parameter-index mappings are put into the map, and - * the parsed sql is returned. - * - * @param sql sql to parse - * @param paramMap map to hold parameter-index mappings - * @return the parsed sql - */ - public static String parseNamedStatement(String sql, Map> paramMap) { - StringBuilder parsedSql = new StringBuilder(); - int fieldIndex = 1; // SQL statement parameter index starts from 1 - int length = sql.length(); - for (int i = 0; i < length; i++) { - char c = sql.charAt(i); - if (':' == c) { - int j = i + 1; - while (j < length && Character.isJavaIdentifierPart(sql.charAt(j))) { - j++; - } - String parameterName = sql.substring(i + 1, j); - checkArgument(!parameterName.isEmpty(), "Named parameters in SQL statement must not be empty."); - paramMap.computeIfAbsent(parameterName, n -> new ArrayList<>()).add(fieldIndex); - fieldIndex++; - i = j - 1; - parsedSql.append('?'); - } else { - parsedSql.append(c); - } - } - return parsedSql.toString(); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/StatementFactory.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/StatementFactory.java deleted file mode 100644 index 0f61c65d5f..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/statement/StatementFactory.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.statement; - -import java.sql.Connection; -import java.sql.SQLException; - -/** A factory to create {@link FieldNamedPreparedStatement} with the given {@link Connection}. */ -public interface StatementFactory { - - /** Creates {@link FieldNamedPreparedStatement} with the given {@link Connection}. */ - FieldNamedPreparedStatement createStatement(Connection connection) throws SQLException; -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableFactory.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableFactory.java deleted file mode 100644 index 7bb299787d..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableFactory.java +++ /dev/null @@ -1,328 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import org.apache.flink.configuration.ConfigOption; -import org.apache.flink.configuration.ConfigOptions; -import org.apache.flink.configuration.ReadableConfig; -import org.apache.flink.connector.phoenix.JdbcExecutionOptions; -import org.apache.flink.connector.phoenix.dialect.JdbcDialect; -import org.apache.flink.connector.phoenix.dialect.JdbcDialects; -import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions; -import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions; -import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions; -import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.connector.sink.DynamicTableSink; -import org.apache.flink.table.connector.source.DynamicTableSource; -import org.apache.flink.table.factories.DynamicTableSinkFactory; -import org.apache.flink.table.factories.DynamicTableSourceFactory; -import org.apache.flink.table.factories.FactoryUtil; -import org.apache.flink.table.utils.TableSchemaUtils; -import org.apache.flink.util.Preconditions; - -import java.time.Duration; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Optional; -import java.util.Set; - -/** - * PhoenixDynamicTableFactory - * - * @since 2022/3/17 9:44 - */ -public class PhoenixDynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory { - public static final String IDENTIFIER = "phoenix-jdbc"; - public static final ConfigOption URL = - ConfigOptions.key("url").stringType().noDefaultValue().withDescription("The JDBC database URL."); - public static final ConfigOption TABLE_NAME = - ConfigOptions.key("table-name").stringType().noDefaultValue().withDescription("The JDBC table name."); - public static final ConfigOption USERNAME = - ConfigOptions.key("username").stringType().noDefaultValue().withDescription("The JDBC user name."); - public static final ConfigOption PASSWORD = - ConfigOptions.key("password").stringType().noDefaultValue().withDescription("The JDBC password."); - private static final ConfigOption DRIVER = ConfigOptions.key("driver") - .stringType() - .noDefaultValue() - .withDescription("The class name of the JDBC driver to use to connect to this URL. If" - + " not set, it will automatically be derived from the URL."); - public static final ConfigOption MAX_RETRY_TIMEOUT = ConfigOptions.key("connection.max-retry-timeout") - .durationType() - .defaultValue(Duration.ofSeconds(60L)) - .withDescription("Maximum timeout between retries."); - private static final ConfigOption SCAN_PARTITION_COLUMN = ConfigOptions.key("scan.partition.column") - .stringType() - .noDefaultValue() - .withDescription("The column name used for partitioning the input."); - private static final ConfigOption SCAN_PARTITION_NUM = ConfigOptions.key("scan.partition.num") - .intType() - .noDefaultValue() - .withDescription("The number of partitions."); - private static final ConfigOption SCAN_PARTITION_LOWER_BOUND = ConfigOptions.key("scan.partition.lower-bound") - .longType() - .noDefaultValue() - .withDescription("The smallest value of the first partition."); - private static final ConfigOption SCAN_PARTITION_UPPER_BOUND = ConfigOptions.key("scan.partition.upper-bound") - .longType() - .noDefaultValue() - .withDescription("The largest value of the last partition."); - private static final ConfigOption SCAN_FETCH_SIZE = ConfigOptions.key("scan.fetch-size") - .intType() - .defaultValue(0) - .withDescription("Gives the reader a hint as to the number of rows that should be" - + " fetched from the database per round-trip when reading. If the" - + " value is zero, this hint is ignored."); - private static final ConfigOption SCAN_AUTO_COMMIT = ConfigOptions.key("scan.auto-commit") - .booleanType() - .defaultValue(true) - .withDescription("Sets whether the driver is in auto-commit mode."); - private static final ConfigOption LOOKUP_CACHE_MAX_ROWS = ConfigOptions.key("lookup.cache.max-rows") - .longType() - .defaultValue(-1L) - .withDescription("The max number of rows of lookup cache, over this value, the oldest" - + " rows will be eliminated. \"cache.max-rows\" and \"cache.ttl\"" - + " options must all be specified if any of them is specified."); - private static final ConfigOption LOOKUP_CACHE_TTL = ConfigOptions.key("lookup.cache.ttl") - .durationType() - .defaultValue(Duration.ofSeconds(10L)) - .withDescription("The cache time to live."); - private static final ConfigOption LOOKUP_MAX_RETRIES = ConfigOptions.key("lookup.max-retries") - .intType() - .defaultValue(3) - .withDescription("The max retry times if lookup database failed."); - private static final ConfigOption SINK_BUFFER_FLUSH_MAX_ROWS = ConfigOptions.key( - "sink.buffer-flush.max-rows") - .intType() - .defaultValue(100) - .withDescription("The flush max size (includes all append, upsert and delete records)," - + " over this number of records, will flush data."); - private static final ConfigOption SINK_BUFFER_FLUSH_INTERVAL = ConfigOptions.key( - "sink.buffer-flush.interval") - .durationType() - .defaultValue(Duration.ofSeconds(1L)) - .withDescription("The flush interval mills, over this time, asynchronous threads will" + " flush data."); - private static final ConfigOption SINK_MAX_RETRIES = ConfigOptions.key("sink.max-retries") - .intType() - .defaultValue(3) - .withDescription("The max retry times if writing records to database failed."); - - public static final ConfigOption SCHEMA_NAMESPACE_MAPPING_ENABLE = ConfigOptions.key( - "phoenix.schema.isnamespacemappingenabled") - .booleanType() - .defaultValue(false) - .withDescription("The JDBC phoenix Schema isNamespaceMappingEnabled."); - public static final ConfigOption SCHEMA_MAP_SYSTEMTABLE_ENABLE = ConfigOptions.key( - "phoenix.schema.mapsystemtablestonamespace") - .booleanType() - .defaultValue(false) - .withDescription("The JDBC phoenix mapSystemTablesToNamespace."); - - @Override - public DynamicTableSink createDynamicTableSink(Context context) { - FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); - ReadableConfig config = helper.getOptions(); - helper.validate(); - this.validateConfigOptions(config); - PhoenixJdbcOptions jdbcOptions = this.getJdbcOptions(config); - TableSchema physicalSchema = - TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); - return new PhoenixDynamicTableSink( - jdbcOptions, - this.getJdbcExecutionOptions(config), - this.getJdbcDmlOptions(jdbcOptions, physicalSchema), - physicalSchema); - } - - @Override - public DynamicTableSource createDynamicTableSource(Context context) { - FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); - ReadableConfig config = helper.getOptions(); - helper.validate(); - this.validateConfigOptions(config); - TableSchema physicalSchema = - TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); - return new PhoenixDynamicTableSource( - this.getJdbcOptions(helper.getOptions()), - this.getJdbcReadOptions(helper.getOptions()), - this.getJdbcLookupOptions(helper.getOptions()), - physicalSchema); - } - - private PhoenixJdbcOptions getJdbcOptions(ReadableConfig readableConfig) { - String url = (String) readableConfig.get(URL); - PhoenixJdbcOptions.Builder builder = PhoenixJdbcOptions.builder() - .setDBUrl(url) - .setTableName((String) readableConfig.get(TABLE_NAME)) - .setDialect((JdbcDialect) JdbcDialects.get(url).get()) - .setParallelism((Integer) - readableConfig.getOptional(FactoryUtil.SINK_PARALLELISM).orElse((Integer) null)) - .setConnectionCheckTimeoutSeconds((int) ((Duration) readableConfig.get(MAX_RETRY_TIMEOUT)).getSeconds()) - .setNamespaceMappingEnabled(readableConfig.get(SCHEMA_NAMESPACE_MAPPING_ENABLE)) - .setMapSystemTablesToNamespace(readableConfig.get(SCHEMA_MAP_SYSTEMTABLE_ENABLE)); - readableConfig.getOptional(DRIVER).ifPresent(builder::setDriverName); - readableConfig.getOptional(USERNAME).ifPresent(builder::setUsername); - readableConfig.getOptional(PASSWORD).ifPresent(builder::setPassword); - return builder.build(); - } - - private JdbcReadOptions getJdbcReadOptions(ReadableConfig readableConfig) { - Optional partitionColumnName = readableConfig.getOptional(SCAN_PARTITION_COLUMN); - JdbcReadOptions.Builder builder = JdbcReadOptions.builder(); - if (partitionColumnName.isPresent()) { - builder.setPartitionColumnName((String) partitionColumnName.get()); - builder.setPartitionLowerBound((Long) readableConfig.get(SCAN_PARTITION_LOWER_BOUND)); - builder.setPartitionUpperBound((Long) readableConfig.get(SCAN_PARTITION_UPPER_BOUND)); - builder.setNumPartitions((Integer) readableConfig.get(SCAN_PARTITION_NUM)); - } - - readableConfig.getOptional(SCAN_FETCH_SIZE).ifPresent(builder::setFetchSize); - builder.setAutoCommit((Boolean) readableConfig.get(SCAN_AUTO_COMMIT)); - return builder.build(); - } - - private JdbcLookupOptions getJdbcLookupOptions(ReadableConfig readableConfig) { - return new JdbcLookupOptions( - (Long) readableConfig.get(LOOKUP_CACHE_MAX_ROWS), - ((Duration) readableConfig.get(LOOKUP_CACHE_TTL)).toMillis(), - (Integer) readableConfig.get(LOOKUP_MAX_RETRIES)); - } - - private JdbcExecutionOptions getJdbcExecutionOptions(ReadableConfig config) { - JdbcExecutionOptions.Builder builder = new JdbcExecutionOptions.Builder(); - builder.withBatchSize((Integer) config.get(SINK_BUFFER_FLUSH_MAX_ROWS)); - builder.withBatchIntervalMs(((Duration) config.get(SINK_BUFFER_FLUSH_INTERVAL)).toMillis()); - builder.withMaxRetries((Integer) config.get(SINK_MAX_RETRIES)); - return builder.build(); - } - - private JdbcDmlOptions getJdbcDmlOptions(PhoenixJdbcOptions jdbcOptions, TableSchema schema) { - String[] keyFields = (String[]) schema.getPrimaryKey() - .map((pk) -> { - return (String[]) pk.getColumns().toArray(new String[0]); - }) - .orElse((String[]) null); - return JdbcDmlOptions.builder() - .withTableName(jdbcOptions.getTableName()) - .withDialect(jdbcOptions.getDialect()) - .withFieldNames(schema.getFieldNames()) - .withKeyFields(keyFields) - .build(); - } - - public String factoryIdentifier() { - return this.IDENTIFIER; - } - - public Set> requiredOptions() { - Set> requiredOptions = new HashSet(); - requiredOptions.add(URL); - requiredOptions.add(TABLE_NAME); - requiredOptions.add(SCHEMA_NAMESPACE_MAPPING_ENABLE); - requiredOptions.add(SCHEMA_MAP_SYSTEMTABLE_ENABLE); - return requiredOptions; - } - - public Set> optionalOptions() { - Set> optionalOptions = new HashSet(); - optionalOptions.add(DRIVER); - optionalOptions.add(USERNAME); - optionalOptions.add(PASSWORD); - optionalOptions.add(SCAN_PARTITION_COLUMN); - optionalOptions.add(SCAN_PARTITION_LOWER_BOUND); - optionalOptions.add(SCAN_PARTITION_UPPER_BOUND); - optionalOptions.add(SCAN_PARTITION_NUM); - optionalOptions.add(SCAN_FETCH_SIZE); - optionalOptions.add(SCAN_AUTO_COMMIT); - optionalOptions.add(LOOKUP_CACHE_MAX_ROWS); - optionalOptions.add(LOOKUP_CACHE_TTL); - optionalOptions.add(LOOKUP_MAX_RETRIES); - optionalOptions.add(SINK_BUFFER_FLUSH_MAX_ROWS); - optionalOptions.add(SINK_BUFFER_FLUSH_INTERVAL); - optionalOptions.add(SINK_MAX_RETRIES); - optionalOptions.add(FactoryUtil.SINK_PARALLELISM); - optionalOptions.add(MAX_RETRY_TIMEOUT); - // optionalOptions.add(SCHEMA_NAMESPACE_MAPPING_ENABLE); - // optionalOptions.add(SCHEMA_MAP_SYSTEMTABLE_ENABLE); - return optionalOptions; - } - - private void validateConfigOptions(ReadableConfig config) { - String jdbcUrl = (String) config.get(URL); - Optional dialect = JdbcDialects.get(jdbcUrl); - Preconditions.checkState(dialect.isPresent(), "Cannot handle such jdbc url: " + jdbcUrl); - this.checkAllOrNone( - config, new ConfigOption[] {SCHEMA_NAMESPACE_MAPPING_ENABLE, SCHEMA_MAP_SYSTEMTABLE_ENABLE}); - this.checkAllOrNone(config, new ConfigOption[] {USERNAME, PASSWORD}); - this.checkAllOrNone(config, new ConfigOption[] { - SCAN_PARTITION_COLUMN, SCAN_PARTITION_NUM, SCAN_PARTITION_LOWER_BOUND, SCAN_PARTITION_UPPER_BOUND - }); - if (config.getOptional(SCAN_PARTITION_LOWER_BOUND).isPresent() - && config.getOptional(SCAN_PARTITION_UPPER_BOUND).isPresent()) { - long lowerBound = (Long) config.get(SCAN_PARTITION_LOWER_BOUND); - long upperBound = (Long) config.get(SCAN_PARTITION_UPPER_BOUND); - if (lowerBound > upperBound) { - throw new IllegalArgumentException(String.format( - "'%s'='%s' must not be larger than '%s'='%s'.", - SCAN_PARTITION_LOWER_BOUND.key(), lowerBound, SCAN_PARTITION_UPPER_BOUND.key(), upperBound)); - } - } - - this.checkAllOrNone(config, new ConfigOption[] {LOOKUP_CACHE_MAX_ROWS, LOOKUP_CACHE_TTL}); - if ((Integer) config.get(LOOKUP_MAX_RETRIES) < 0) { - throw new IllegalArgumentException(String.format( - "The value of '%s' option shouldn't be negative, but is %s.", - LOOKUP_MAX_RETRIES.key(), config.get(LOOKUP_MAX_RETRIES))); - } else if ((Integer) config.get(SINK_MAX_RETRIES) < 0) { - throw new IllegalArgumentException(String.format( - "The value of '%s' option shouldn't be negative, but is %s.", - SINK_MAX_RETRIES.key(), config.get(SINK_MAX_RETRIES))); - } else if (((Duration) config.get(MAX_RETRY_TIMEOUT)).getSeconds() <= 0L) { - throw new IllegalArgumentException(String.format( - "The value of '%s' option must be in second granularity and shouldn't" - + " be smaller than 1 second, but is %s.", - MAX_RETRY_TIMEOUT.key(), - config.get(ConfigOptions.key(MAX_RETRY_TIMEOUT.key()) - .stringType() - .noDefaultValue()))); - } - } - - private void checkAllOrNone(ReadableConfig config, ConfigOption[] configOptions) { - int presentCount = 0; - ConfigOption[] var4 = configOptions; - int var5 = configOptions.length; - - for (int var6 = 0; var6 < var5; ++var6) { - ConfigOption configOption = var4[var6]; - if (config.getOptional(configOption).isPresent()) { - ++presentCount; - } - } - - String[] propertyNames = - (String[]) Arrays.stream(configOptions).map(ConfigOption::key).toArray((x$0) -> { - return new String[x$0]; - }); - Preconditions.checkArgument( - configOptions.length == presentCount || presentCount == 0, - "Either all or none of the following options should be provided:\n" + String.join("\n", propertyNames)); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableSink.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableSink.java deleted file mode 100644 index d3bf9f6620..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableSink.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.connector.phoenix.JdbcExecutionOptions; -import org.apache.flink.connector.phoenix.internal.GenericJdbcSinkFunction; -import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions; -import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.connector.ChangelogMode; -import org.apache.flink.table.connector.sink.DynamicTableSink; -import org.apache.flink.table.connector.sink.SinkFunctionProvider; -import org.apache.flink.table.data.RowData; -import org.apache.flink.types.RowKind; -import org.apache.flink.util.Preconditions; - -import java.util.Objects; - -/** - * PhoenixDynamicTableSink - * - * @since 2022/3/17 11:39 - */ -public class PhoenixDynamicTableSink implements DynamicTableSink { - private final PhoenixJdbcOptions jdbcOptions; - private final JdbcExecutionOptions executionOptions; - private final JdbcDmlOptions dmlOptions; - private final TableSchema tableSchema; - private final String dialectName; - - public PhoenixDynamicTableSink( - PhoenixJdbcOptions jdbcOptions, - JdbcExecutionOptions executionOptions, - JdbcDmlOptions dmlOptions, - TableSchema tableSchema) { - this.jdbcOptions = jdbcOptions; - this.executionOptions = executionOptions; - this.dmlOptions = dmlOptions; - this.tableSchema = tableSchema; - this.dialectName = dmlOptions.getDialect().dialectName(); - } - - @Override - public ChangelogMode getChangelogMode(ChangelogMode requestedMode) { - this.validatePrimaryKey(requestedMode); - return ChangelogMode.newBuilder() - .addContainedKind(RowKind.INSERT) - .addContainedKind(RowKind.DELETE) - .addContainedKind(RowKind.UPDATE_AFTER) - .build(); - } - - private void validatePrimaryKey(ChangelogMode requestedMode) { - Preconditions.checkState( - ChangelogMode.insertOnly().equals(requestedMode) - || this.dmlOptions.getKeyFields().isPresent(), - "please declare primary key for sink table when query contains update/delete" + " record."); - } - - @Override - public SinkRuntimeProvider getSinkRuntimeProvider(Context context) { - TypeInformation rowDataTypeInformation = - context.createTypeInformation(this.tableSchema.toRowDataType()); - PhoenixJdbcDynamicOutputFormatBuilder builder = new PhoenixJdbcDynamicOutputFormatBuilder(); - builder.setJdbcOptions(this.jdbcOptions); - builder.setJdbcDmlOptions(this.dmlOptions); - builder.setJdbcExecutionOptions(this.executionOptions); - builder.setRowDataTypeInfo(rowDataTypeInformation); - builder.setFieldDataTypes(this.tableSchema.getFieldDataTypes()); - return SinkFunctionProvider.of(new GenericJdbcSinkFunction(builder.build()), this.jdbcOptions.getParallelism()); - } - - @Override - public DynamicTableSink copy() { - return new PhoenixDynamicTableSink(this.jdbcOptions, this.executionOptions, this.dmlOptions, this.tableSchema); - } - - @Override - public String asSummaryString() { - return "Phoenix Table Sink "; - } - - public boolean equals(Object o) { - if (this == o) { - return true; - } else if (!(o instanceof PhoenixDynamicTableSink)) { - return false; - } else { - PhoenixDynamicTableSink that = (PhoenixDynamicTableSink) o; - return Objects.equals(this.jdbcOptions, that.jdbcOptions) - && Objects.equals(this.executionOptions, that.executionOptions) - && Objects.equals(this.dmlOptions, that.dmlOptions) - && Objects.equals(this.tableSchema, that.tableSchema) - && Objects.equals(this.dialectName, that.dialectName); - } - } - - public int hashCode() { - return Objects.hash(new Object[] { - this.jdbcOptions, this.executionOptions, this.dmlOptions, this.tableSchema, this.dialectName - }); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableSource.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableSource.java deleted file mode 100644 index f8bb6c5e68..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixDynamicTableSource.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import org.apache.flink.connector.phoenix.dialect.JdbcDialect; -import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions; -import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions; -import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions; -import org.apache.flink.connector.phoenix.split.JdbcNumericBetweenParametersProvider; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.connector.ChangelogMode; -import org.apache.flink.table.connector.source.DynamicTableSource; -import org.apache.flink.table.connector.source.InputFormatProvider; -import org.apache.flink.table.connector.source.LookupTableSource; -import org.apache.flink.table.connector.source.ScanTableSource; -import org.apache.flink.table.connector.source.TableFunctionProvider; -import org.apache.flink.table.connector.source.abilities.SupportsLimitPushDown; -import org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown; -import org.apache.flink.table.types.logical.RowType; -import org.apache.flink.table.utils.TableSchemaUtils; -import org.apache.flink.util.Preconditions; - -import java.util.Objects; - -/** - * PhoenixDynamicTableSource - * - * @since 2022/3/17 10:40 - */ -public class PhoenixDynamicTableSource - implements ScanTableSource, LookupTableSource, SupportsProjectionPushDown, SupportsLimitPushDown { - - private final PhoenixJdbcOptions options; - private final JdbcReadOptions readOptions; - private final JdbcLookupOptions lookupOptions; - private TableSchema physicalSchema; - private final String dialectName; - private long limit = -1L; - - public PhoenixDynamicTableSource( - PhoenixJdbcOptions options, - JdbcReadOptions readOptions, - JdbcLookupOptions lookupOptions, - TableSchema physicalSchema) { - this.options = options; - this.readOptions = readOptions; - this.lookupOptions = lookupOptions; - this.physicalSchema = physicalSchema; - this.dialectName = options.getDialect().dialectName(); - } - - @Override - public LookupRuntimeProvider getLookupRuntimeProvider(LookupContext context) { - // JDBC only support non-nested look up keys - String[] keyNames = new String[context.getKeys().length]; - for (int i = 0; i < keyNames.length; i++) { - int[] innerKeyArr = context.getKeys()[i]; - Preconditions.checkArgument(innerKeyArr.length == 1, "JDBC only support non-nested look up keys"); - keyNames[i] = physicalSchema.getFieldNames()[innerKeyArr[0]]; - } - final RowType rowType = (RowType) physicalSchema.toRowDataType().getLogicalType(); - - return TableFunctionProvider.of(new PhoenixRowDataLookupFunction( - options, - lookupOptions, - physicalSchema.getFieldNames(), - physicalSchema.getFieldDataTypes(), - keyNames, - rowType)); - } - - @Override - public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) { - PhoenixJdbcRowDataInputFormat.Builder builder = PhoenixJdbcRowDataInputFormat.builder() - .setDrivername(this.options.getDriverName()) - .setDBUrl(this.options.getDbURL()) - .setUsername((String) this.options.getUsername().orElse((String) null)) - .setPassword((String) this.options.getPassword().orElse((String) null)) - .setAutoCommit(this.readOptions.getAutoCommit()) - // setting phoenix schema - .setNamespaceMappingEnabled(this.options.getNamespaceMappingEnabled()) - .setMapSystemTablesToNamespace(this.options.getMapSystemTablesToNamespace()); - - if (this.readOptions.getFetchSize() != 0) { - builder.setFetchSize(this.readOptions.getFetchSize()); - } - - JdbcDialect dialect = this.options.getDialect(); - String query = dialect.getSelectFromStatement( - this.options.getTableName(), this.physicalSchema.getFieldNames(), new String[0]); - if (this.readOptions.getPartitionColumnName().isPresent()) { - long lowerBound = (Long) this.readOptions.getPartitionLowerBound().get(); - long upperBound = (Long) this.readOptions.getPartitionUpperBound().get(); - int numPartitions = (Integer) this.readOptions.getNumPartitions().get(); - builder.setParametersProvider( - (new JdbcNumericBetweenParametersProvider(lowerBound, upperBound)).ofBatchNum(numPartitions)); - query = query - + " WHERE " - + dialect.quoteIdentifier( - (String) this.readOptions.getPartitionColumnName().get()) - + " BETWEEN ? AND ?"; - } - - if (this.limit >= 0L) { - query = String.format("%s %s", query, dialect.getLimitClause(this.limit)); - } - - builder.setQuery(query); - RowType rowType = (RowType) this.physicalSchema.toRowDataType().getLogicalType(); - builder.setRowConverter(dialect.getRowConverter(rowType)); - builder.setRowDataTypeInfo(runtimeProviderContext.createTypeInformation(this.physicalSchema.toRowDataType())); - return InputFormatProvider.of(builder.build()); - } - - @Override - public ChangelogMode getChangelogMode() { - return ChangelogMode.insertOnly(); - } - - @Override - public boolean supportsNestedProjection() { - return false; - } - - @Override - public void applyProjection(int[][] projectedFields) { - this.physicalSchema = TableSchemaUtils.projectSchema(this.physicalSchema, projectedFields); - } - - public DynamicTableSource copy() { - return new PhoenixDynamicTableSource(this.options, this.readOptions, this.lookupOptions, this.physicalSchema); - } - - public String asSummaryString() { - return "JDBC:" + this.dialectName; - } - - public boolean equals(Object o) { - if (this == o) { - return true; - } else if (!(o instanceof PhoenixDynamicTableSource)) { - return false; - } else { - PhoenixDynamicTableSource that = (PhoenixDynamicTableSource) o; - return Objects.equals(this.options, that.options) - && Objects.equals(this.physicalSchema, that.physicalSchema) - && Objects.equals(this.dialectName, that.dialectName) - && Objects.equals(this.limit, that.limit); - } - } - - public int hashCode() { - return Objects.hash(new Object[] { - this.options, this.readOptions, this.lookupOptions, this.physicalSchema, this.dialectName, this.limit - }); - } - - public void applyLimit(long limit) { - this.limit = limit; - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcDynamicOutputFormatBuilder.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcDynamicOutputFormatBuilder.java deleted file mode 100644 index 27984b447c..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcDynamicOutputFormatBuilder.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import static org.apache.flink.table.data.RowData.createFieldGetter; -import static org.apache.flink.util.Preconditions.checkArgument; -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.api.common.functions.RuntimeContext; -import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.common.typeutils.TypeSerializer; -import org.apache.flink.connector.phoenix.JdbcExecutionOptions; -import org.apache.flink.connector.phoenix.dialect.JdbcDialect; -import org.apache.flink.connector.phoenix.internal.JdbcBatchingOutputFormat; -import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter; -import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor; -import org.apache.flink.connector.phoenix.internal.executor.TableBufferReducedStatementExecutor; -import org.apache.flink.connector.phoenix.internal.executor.TableBufferedStatementExecutor; -import org.apache.flink.connector.phoenix.internal.executor.TableInsertOrUpdateStatementExecutor; -import org.apache.flink.connector.phoenix.internal.executor.TableSimpleStatementExecutor; -import org.apache.flink.connector.phoenix.internal.options.JdbcDmlOptions; -import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions; -import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement; -import org.apache.flink.table.data.GenericRowData; -import org.apache.flink.table.data.RowData; -import org.apache.flink.table.types.DataType; -import org.apache.flink.table.types.logical.LogicalType; -import org.apache.flink.table.types.logical.RowType; - -import java.io.Serializable; -import java.util.Arrays; -import java.util.function.Function; - -/** - * PhoenixJdbcDynamicOutputFormatBuilder - * - * @since 2022/3/17 11:43J - */ -public class PhoenixJdbcDynamicOutputFormatBuilder implements Serializable { - private static final long serialVersionUID = 1L; - - private PhoenixJdbcOptions jdbcOptions; - private JdbcExecutionOptions executionOptions; - private JdbcDmlOptions dmlOptions; - private TypeInformation rowDataTypeInformation; - private DataType[] fieldDataTypes; - - public PhoenixJdbcDynamicOutputFormatBuilder() {} - - public PhoenixJdbcDynamicOutputFormatBuilder setJdbcOptions(PhoenixJdbcOptions jdbcOptions) { - this.jdbcOptions = jdbcOptions; - return this; - } - - public PhoenixJdbcDynamicOutputFormatBuilder setJdbcExecutionOptions(JdbcExecutionOptions executionOptions) { - this.executionOptions = executionOptions; - return this; - } - - public PhoenixJdbcDynamicOutputFormatBuilder setJdbcDmlOptions(JdbcDmlOptions dmlOptions) { - this.dmlOptions = dmlOptions; - return this; - } - - public PhoenixJdbcDynamicOutputFormatBuilder setRowDataTypeInfo(TypeInformation rowDataTypeInfo) { - this.rowDataTypeInformation = rowDataTypeInfo; - return this; - } - - public PhoenixJdbcDynamicOutputFormatBuilder setFieldDataTypes(DataType[] fieldDataTypes) { - this.fieldDataTypes = fieldDataTypes; - return this; - } - - public JdbcBatchingOutputFormat build() { - checkNotNull(jdbcOptions, "jdbc options can not be null"); - checkNotNull(dmlOptions, "jdbc dml options can not be null"); - checkNotNull(executionOptions, "jdbc execution options can not be null"); - - final LogicalType[] logicalTypes = - Arrays.stream(fieldDataTypes).map(DataType::getLogicalType).toArray(LogicalType[]::new); - if (dmlOptions.getKeyFields().isPresent() && dmlOptions.getKeyFields().get().length > 0) { - // upsert query - return new JdbcBatchingOutputFormat<>( - new PhoneixJdbcConnectionProvider( - jdbcOptions, - jdbcOptions.getNamespaceMappingEnabled(), - jdbcOptions.getMapSystemTablesToNamespace()), - executionOptions, - ctx -> createBufferReduceExecutor(dmlOptions, ctx, rowDataTypeInformation, logicalTypes), - JdbcBatchingOutputFormat.RecordExtractor.identity()); - } else { - // append only query - final String sql = dmlOptions - .getDialect() - .getInsertIntoStatement(dmlOptions.getTableName(), dmlOptions.getFieldNames()); - return new JdbcBatchingOutputFormat<>( - new PhoneixJdbcConnectionProvider( - jdbcOptions, - jdbcOptions.getNamespaceMappingEnabled(), - jdbcOptions.getMapSystemTablesToNamespace()), - executionOptions, - ctx -> createSimpleBufferedExecutor( - ctx, - dmlOptions.getDialect(), - dmlOptions.getFieldNames(), - logicalTypes, - sql, - rowDataTypeInformation), - JdbcBatchingOutputFormat.RecordExtractor.identity()); - } - } - - private static JdbcBatchStatementExecutor createBufferReduceExecutor( - JdbcDmlOptions opt, - RuntimeContext ctx, - TypeInformation rowDataTypeInfo, - LogicalType[] fieldTypes) { - checkArgument(opt.getKeyFields().isPresent()); - JdbcDialect dialect = opt.getDialect(); - String tableName = opt.getTableName(); - String[] pkNames = opt.getKeyFields().get(); - int[] pkFields = Arrays.stream(pkNames) - .mapToInt(Arrays.asList(opt.getFieldNames())::indexOf) - .toArray(); - LogicalType[] pkTypes = - Arrays.stream(pkFields).mapToObj(f -> fieldTypes[f]).toArray(LogicalType[]::new); - final TypeSerializer typeSerializer = rowDataTypeInfo.createSerializer(ctx.getExecutionConfig()); - final Function valueTransform = - ctx.getExecutionConfig().isObjectReuseEnabled() ? typeSerializer::copy : Function.identity(); - - return new TableBufferReducedStatementExecutor( - createUpsertRowExecutor( - dialect, tableName, opt.getFieldNames(), fieldTypes, pkFields, pkNames, pkTypes), - createDeleteExecutor(dialect, tableName, pkNames, pkTypes), - createRowKeyExtractor(fieldTypes, pkFields), - valueTransform); - } - - private static JdbcBatchStatementExecutor createSimpleBufferedExecutor( - RuntimeContext ctx, - JdbcDialect dialect, - String[] fieldNames, - LogicalType[] fieldTypes, - String sql, - TypeInformation rowDataTypeInfo) { - final TypeSerializer typeSerializer = rowDataTypeInfo.createSerializer(ctx.getExecutionConfig()); - return new TableBufferedStatementExecutor( - createSimpleRowExecutor(dialect, fieldNames, fieldTypes, sql), - ctx.getExecutionConfig().isObjectReuseEnabled() ? typeSerializer::copy : Function.identity()); - } - - private static JdbcBatchStatementExecutor createUpsertRowExecutor( - JdbcDialect dialect, - String tableName, - String[] fieldNames, - LogicalType[] fieldTypes, - int[] pkFields, - String[] pkNames, - LogicalType[] pkTypes) { - return dialect.getUpsertStatement(tableName, fieldNames, pkNames) - .map(sql -> createSimpleRowExecutor(dialect, fieldNames, fieldTypes, sql)) - .orElseGet(() -> createInsertOrUpdateExecutor( - dialect, tableName, fieldNames, fieldTypes, pkFields, pkNames, pkTypes)); - } - - private static JdbcBatchStatementExecutor createDeleteExecutor( - JdbcDialect dialect, String tableName, String[] pkNames, LogicalType[] pkTypes) { - String deleteSql = dialect.getDeleteStatement(tableName, pkNames); - return createSimpleRowExecutor(dialect, pkNames, pkTypes, deleteSql); - } - - private static JdbcBatchStatementExecutor createSimpleRowExecutor( - JdbcDialect dialect, String[] fieldNames, LogicalType[] fieldTypes, final String sql) { - final JdbcRowConverter rowConverter = dialect.getRowConverter(RowType.of(fieldTypes)); - return new TableSimpleStatementExecutor( - connection -> FieldNamedPreparedStatement.prepareStatement(connection, sql, fieldNames), rowConverter); - } - - private static JdbcBatchStatementExecutor createInsertOrUpdateExecutor( - JdbcDialect dialect, - String tableName, - String[] fieldNames, - LogicalType[] fieldTypes, - int[] pkFields, - String[] pkNames, - LogicalType[] pkTypes) { - final String existStmt = dialect.getRowExistsStatement(tableName, pkNames); - final String insertStmt = dialect.getInsertIntoStatement(tableName, fieldNames); - final String updateStmt = dialect.getUpdateStatement(tableName, fieldNames, pkNames); - return new TableInsertOrUpdateStatementExecutor( - connection -> FieldNamedPreparedStatement.prepareStatement(connection, existStmt, pkNames), - connection -> FieldNamedPreparedStatement.prepareStatement(connection, insertStmt, fieldNames), - connection -> FieldNamedPreparedStatement.prepareStatement(connection, updateStmt, fieldNames), - dialect.getRowConverter(RowType.of(pkTypes)), - dialect.getRowConverter(RowType.of(fieldTypes)), - dialect.getRowConverter(RowType.of(fieldTypes)), - createRowKeyExtractor(fieldTypes, pkFields)); - } - - private static Function createRowKeyExtractor(LogicalType[] logicalTypes, int[] pkFields) { - final RowData.FieldGetter[] fieldGetters = new RowData.FieldGetter[pkFields.length]; - for (int i = 0; i < pkFields.length; i++) { - fieldGetters[i] = createFieldGetter(logicalTypes[pkFields[i]], pkFields[i]); - } - return row -> getPrimaryKey(row, fieldGetters); - } - - private static RowData getPrimaryKey(RowData row, RowData.FieldGetter[] fieldGetters) { - GenericRowData pkRow = new GenericRowData(fieldGetters.length); - for (int i = 0; i < fieldGetters.length; i++) { - pkRow.setField(i, fieldGetters[i].getFieldOrNull(row)); - } - return pkRow; - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcRowDataInputFormat.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcRowDataInputFormat.java deleted file mode 100644 index 64f60a3264..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcRowDataInputFormat.java +++ /dev/null @@ -1,375 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import org.apache.flink.api.common.io.DefaultInputSplitAssigner; -import org.apache.flink.api.common.io.RichInputFormat; -import org.apache.flink.api.common.io.statistics.BaseStatistics; -import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.typeutils.ResultTypeQueryable; -import org.apache.flink.configuration.Configuration; -import org.apache.flink.connector.phoenix.JdbcConnectionOptions; -import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter; -import org.apache.flink.connector.phoenix.split.JdbcParameterValuesProvider; -import org.apache.flink.core.io.GenericInputSplit; -import org.apache.flink.core.io.InputSplit; -import org.apache.flink.core.io.InputSplitAssigner; -import org.apache.flink.table.data.RowData; -import org.apache.flink.util.Preconditions; - -import java.io.IOException; -import java.math.BigDecimal; -import java.sql.Array; -import java.sql.Connection; -import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.Arrays; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * PhoenixJdbcRowDataInputFormat - * - * @since 2022/3/17 10:53 - */ -public class PhoenixJdbcRowDataInputFormat extends RichInputFormat - implements ResultTypeQueryable { - private static final long serialVersionUID = 2L; - private static final Logger LOG = LoggerFactory.getLogger(PhoenixJdbcRowDataInputFormat.class); - private JdbcConnectionProvider connectionProvider; - private int fetchSize; - private Boolean autoCommit; - private Object[][] parameterValues; - private String queryTemplate; - private int resultSetType; - private int resultSetConcurrency; - private JdbcRowConverter rowConverter; - private TypeInformation rowDataTypeInfo; - private transient PreparedStatement statement; - private transient ResultSet resultSet; - private transient boolean hasNext; - private boolean namespaceMappingEnabled; - private boolean mapSystemTablesEnabled; - - private PhoenixJdbcRowDataInputFormat( - JdbcConnectionProvider connectionProvider, - int fetchSize, - Boolean autoCommit, - Object[][] parameterValues, - String queryTemplate, - int resultSetType, - int resultSetConcurrency, - JdbcRowConverter rowConverter, - TypeInformation rowDataTypeInfo, - boolean namespaceMappingEnabled, - boolean mapSystemTablesEnabled) { - this.connectionProvider = connectionProvider; - this.fetchSize = fetchSize; - this.autoCommit = autoCommit; - this.parameterValues = parameterValues; - this.queryTemplate = queryTemplate; - this.resultSetType = resultSetType; - this.resultSetConcurrency = resultSetConcurrency; - this.rowConverter = rowConverter; - this.rowDataTypeInfo = rowDataTypeInfo; - this.namespaceMappingEnabled = namespaceMappingEnabled; - this.mapSystemTablesEnabled = mapSystemTablesEnabled; - } - - public void configure(Configuration parameters) {} - - public void openInputFormat() { - try { - Connection dbConn = this.connectionProvider.getOrEstablishConnection(); - if (this.autoCommit != null) { - dbConn.setAutoCommit(this.autoCommit); - } - - this.statement = dbConn.prepareStatement(this.queryTemplate, this.resultSetType, this.resultSetConcurrency); - if (this.fetchSize == -2147483648 || this.fetchSize > 0) { - this.statement.setFetchSize(this.fetchSize); - } - - } catch (SQLException var2) { - throw new IllegalArgumentException("open() failed." + var2.getMessage(), var2); - } catch (ClassNotFoundException var3) { - throw new IllegalArgumentException("JDBC-Class not found. - " + var3.getMessage(), var3); - } - } - - public void closeInputFormat() { - try { - if (this.statement != null) { - this.statement.close(); - } - } catch (SQLException var5) { - LOG.info("Inputformat Statement couldn't be closed - " + var5.getMessage()); - } finally { - this.statement = null; - } - - this.connectionProvider.closeConnection(); - this.parameterValues = (Object[][]) null; - } - - public void open(InputSplit inputSplit) throws IOException { - try { - if (inputSplit != null && this.parameterValues != null) { - for (int i = 0; i < this.parameterValues[inputSplit.getSplitNumber()].length; ++i) { - Object param = this.parameterValues[inputSplit.getSplitNumber()][i]; - if (param instanceof String) { - this.statement.setString(i + 1, (String) param); - } else if (param instanceof Long) { - this.statement.setLong(i + 1, (Long) param); - } else if (param instanceof Integer) { - this.statement.setInt(i + 1, (Integer) param); - } else if (param instanceof Double) { - this.statement.setDouble(i + 1, (Double) param); - } else if (param instanceof Boolean) { - this.statement.setBoolean(i + 1, (Boolean) param); - } else if (param instanceof Float) { - this.statement.setFloat(i + 1, (Float) param); - } else if (param instanceof BigDecimal) { - this.statement.setBigDecimal(i + 1, (BigDecimal) param); - } else if (param instanceof Byte) { - this.statement.setByte(i + 1, (Byte) param); - } else if (param instanceof Short) { - this.statement.setShort(i + 1, (Short) param); - } else if (param instanceof Date) { - this.statement.setDate(i + 1, (Date) param); - } else if (param instanceof Time) { - this.statement.setTime(i + 1, (Time) param); - } else if (param instanceof Timestamp) { - this.statement.setTimestamp(i + 1, (Timestamp) param); - } else { - if (!(param instanceof Array)) { - throw new IllegalArgumentException("open() failed. Parameter " - + i - + " of type " - + param.getClass() - + " is not handled (yet)."); - } - - this.statement.setArray(i + 1, (Array) param); - } - } - - if (LOG.isDebugEnabled()) { - LOG.debug(String.format( - "Executing '%s' with parameters %s", - this.queryTemplate, - Arrays.deepToString(this.parameterValues[inputSplit.getSplitNumber()]))); - } - } - - this.resultSet = this.statement.executeQuery(); - this.hasNext = this.resultSet.next(); - } catch (SQLException var4) { - throw new IllegalArgumentException("open() failed." + var4.getMessage(), var4); - } - } - - public void close() throws IOException { - if (this.resultSet != null) { - try { - this.resultSet.close(); - } catch (SQLException var2) { - LOG.info("Inputformat ResultSet couldn't be closed - " + var2.getMessage()); - } - } - } - - public TypeInformation getProducedType() { - return this.rowDataTypeInfo; - } - - public boolean reachedEnd() throws IOException { - return !this.hasNext; - } - - public RowData nextRecord(RowData reuse) throws IOException { - try { - if (!this.hasNext) { - return null; - } else { - RowData row = this.rowConverter.toInternal(this.resultSet); - this.hasNext = this.resultSet.next(); - return row; - } - } catch (SQLException var3) { - throw new IOException("Couldn't read data - " + var3.getMessage(), var3); - } catch (NullPointerException var4) { - throw new IOException("Couldn't access resultSet", var4); - } - } - - public BaseStatistics getStatistics(BaseStatistics cachedStatistics) throws IOException { - return cachedStatistics; - } - - public InputSplit[] createInputSplits(int minNumSplits) throws IOException { - if (this.parameterValues == null) { - return new GenericInputSplit[] {new GenericInputSplit(0, 1)}; - } else { - GenericInputSplit[] ret = new GenericInputSplit[this.parameterValues.length]; - - for (int i = 0; i < ret.length; ++i) { - ret[i] = new GenericInputSplit(i, ret.length); - } - - return ret; - } - } - - public InputSplitAssigner getInputSplitAssigner(InputSplit[] inputSplits) { - return new DefaultInputSplitAssigner(inputSplits); - } - - public static Builder builder() { - return new Builder(); - } - - public static class Builder { - private JdbcConnectionOptions.JdbcConnectionOptionsBuilder connOptionsBuilder = - new JdbcConnectionOptions.JdbcConnectionOptionsBuilder(); - private int fetchSize; - private Boolean autoCommit; - private Object[][] parameterValues; - private String queryTemplate; - private JdbcRowConverter rowConverter; - private TypeInformation rowDataTypeInfo; - private int resultSetType = 1003; - private int resultSetConcurrency = 1007; - private boolean namespaceMappingEnabled; - private boolean mapSystemTablesEnabled; - - public Builder() {} - - public Builder setDrivername(String drivername) { - this.connOptionsBuilder.withDriverName(drivername); - return this; - } - - public Builder setDBUrl(String dbURL) { - this.connOptionsBuilder.withUrl(dbURL); - return this; - } - - public Builder setUsername(String username) { - this.connOptionsBuilder.withUsername(username); - return this; - } - - public Builder setPassword(String password) { - this.connOptionsBuilder.withPassword(password); - return this; - } - - public Builder setQuery(String query) { - this.queryTemplate = query; - return this; - } - - public Builder setParametersProvider(JdbcParameterValuesProvider parameterValuesProvider) { - this.parameterValues = parameterValuesProvider.getParameterValues(); - return this; - } - - public Builder setRowDataTypeInfo(TypeInformation rowDataTypeInfo) { - this.rowDataTypeInfo = rowDataTypeInfo; - return this; - } - - public Builder setRowConverter(JdbcRowConverter rowConverter) { - this.rowConverter = rowConverter; - return this; - } - - public Builder setFetchSize(int fetchSize) { - Preconditions.checkArgument( - fetchSize == -2147483648 || fetchSize > 0, - "Illegal value %s for fetchSize, has to be positive or Integer.MIN_VALUE.", - new Object[] {fetchSize}); - this.fetchSize = fetchSize; - return this; - } - - public Builder setAutoCommit(boolean autoCommit) { - this.autoCommit = autoCommit; - return this; - } - - public Builder setResultSetType(int resultSetType) { - this.resultSetType = resultSetType; - return this; - } - - public Builder setResultSetConcurrency(int resultSetConcurrency) { - this.resultSetConcurrency = resultSetConcurrency; - return this; - } - - public Builder setNamespaceMappingEnabled(Boolean namespaceMappingEnabled) { - this.namespaceMappingEnabled = namespaceMappingEnabled; - return this; - } - - public Builder setMapSystemTablesToNamespace(Boolean mapSystemTablesEnabled) { - this.mapSystemTablesEnabled = mapSystemTablesEnabled; - return this; - } - - public PhoenixJdbcRowDataInputFormat build() { - if (this.queryTemplate == null) { - throw new NullPointerException("No query supplied"); - } else if (this.rowConverter == null) { - throw new NullPointerException("No row converter supplied"); - } else { - if (this.parameterValues == null) { - PhoenixJdbcRowDataInputFormat.LOG.debug( - "No input splitting configured (data will be read with parallelism" + " 1)."); - } - - return new PhoenixJdbcRowDataInputFormat( - new PhoneixJdbcConnectionProvider( - this.connOptionsBuilder.build(), - this.namespaceMappingEnabled, - this.mapSystemTablesEnabled), - this.fetchSize, - this.autoCommit, - this.parameterValues, - this.queryTemplate, - this.resultSetType, - this.resultSetConcurrency, - this.rowConverter, - this.rowDataTypeInfo, - this.namespaceMappingEnabled, - this.mapSystemTablesEnabled); - } - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcSinkFunction.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcSinkFunction.java deleted file mode 100644 index ebb728b267..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixJdbcSinkFunction.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import org.apache.flink.runtime.state.FunctionInitializationContext; -import org.apache.flink.runtime.state.FunctionSnapshotContext; -import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; -import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; - -/** - * PhoenixJdbcSinkFunction - * - * @since 2022/3/17 17:41 - */ -public class PhoenixJdbcSinkFunction extends RichSinkFunction implements CheckpointedFunction { - - @Override - public void snapshotState(FunctionSnapshotContext functionSnapshotContext) throws Exception {} - - @Override - public void initializeState(FunctionInitializationContext functionInitializationContext) throws Exception {} - - @Override - public void invoke(T value) throws Exception {} - - @Override - public void invoke(T value, Context context) throws Exception {} -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixLookupFunction.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixLookupFunction.java deleted file mode 100644 index fd8e3b214d..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixLookupFunction.java +++ /dev/null @@ -1,301 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import static org.apache.flink.connector.phoenix.utils.JdbcUtils.getFieldFromResultSet; -import static org.apache.flink.util.Preconditions.checkArgument; -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.annotation.VisibleForTesting; -import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions; -import org.apache.flink.connector.phoenix.internal.options.JdbcOptions; -import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatementImpl; -import org.apache.flink.connector.phoenix.utils.JdbcTypeUtil; -import org.apache.flink.connector.phoenix.utils.JdbcUtils; -import org.apache.flink.shaded.guava30.com.google.common.cache.Cache; -import org.apache.flink.shaded.guava30.com.google.common.cache.CacheBuilder; -import org.apache.flink.table.functions.FunctionContext; -import org.apache.flink.table.functions.TableFunction; -import org.apache.flink.types.Row; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A {@link TableFunction} to query fields from JDBC by keys. The query template like: - * - *

- * SELECT c, d, e, f from T where a = ? and b = ?
- * 
- * - *

Support cache the result to avoid frequent accessing to remote databases. 1.The cacheMaxSize - * is -1 means not use cache. 2.For real-time data, you need to set the TTL of cache. - */ -public class PhoenixLookupFunction extends TableFunction { - - private static final Logger LOG = LoggerFactory.getLogger(PhoenixLookupFunction.class); - private static final long serialVersionUID = 2L; - - private final String query; - private final JdbcConnectionProvider connectionProvider; - private final TypeInformation[] keyTypes; - private final int[] keySqlTypes; - private final String[] fieldNames; - private final String[] keyNames; - private final TypeInformation[] fieldTypes; - private final int[] outputSqlTypes; - private final long cacheMaxSize; - private final long cacheExpireMs; - private final int maxRetryTimes; - - private transient PreparedStatement statement; - private transient Cache> cache; - - public PhoenixLookupFunction( - JdbcOptions options, - JdbcLookupOptions lookupOptions, - String[] fieldNames, - TypeInformation[] fieldTypes, - String[] keyNames) { - this.connectionProvider = new PhoneixJdbcConnectionProvider(options); - this.fieldNames = fieldNames; - this.fieldTypes = fieldTypes; - this.keyNames = keyNames; - List nameList = Arrays.asList(fieldNames); - this.keyTypes = Arrays.stream(keyNames) - .map(s -> { - checkArgument(nameList.contains(s), "keyName %s can't find in fieldNames %s.", s, nameList); - return fieldTypes[nameList.indexOf(s)]; - }) - .toArray(TypeInformation[]::new); - this.cacheMaxSize = lookupOptions.getCacheMaxSize(); - this.cacheExpireMs = lookupOptions.getCacheExpireMs(); - this.maxRetryTimes = lookupOptions.getMaxRetryTimes(); - this.keySqlTypes = Arrays.stream(keyTypes) - .mapToInt(JdbcTypeUtil::typeInformationToSqlType) - .toArray(); - this.outputSqlTypes = Arrays.stream(fieldTypes) - .mapToInt(JdbcTypeUtil::typeInformationToSqlType) - .toArray(); - this.query = FieldNamedPreparedStatementImpl.parseNamedStatement( - options.getDialect().getSelectFromStatement(options.getTableName(), fieldNames, keyNames), - new HashMap<>()); - } - - public static Builder builder() { - return new Builder(); - } - - @Override - public void open(FunctionContext context) throws Exception { - try { - establishConnectionAndStatement(); - this.cache = cacheMaxSize == -1 || cacheExpireMs == -1 - ? null - : CacheBuilder.newBuilder() - .expireAfterWrite(cacheExpireMs, TimeUnit.MILLISECONDS) - .maximumSize(cacheMaxSize) - .build(); - } catch (SQLException sqe) { - throw new IllegalArgumentException("open() failed.", sqe); - } catch (ClassNotFoundException cnfe) { - throw new IllegalArgumentException("JDBC driver class not found.", cnfe); - } - } - - public void eval(Object... keys) { - Row keyRow = Row.of(keys); - if (cache != null) { - List cachedRows = cache.getIfPresent(keyRow); - if (cachedRows != null) { - for (Row cachedRow : cachedRows) { - collect(cachedRow); - } - return; - } - } - - for (int retry = 0; retry <= maxRetryTimes; retry++) { - try { - statement.clearParameters(); - for (int i = 0; i < keys.length; i++) { - JdbcUtils.setField(statement, keySqlTypes[i], keys[i], i); - } - try (ResultSet resultSet = statement.executeQuery()) { - if (cache == null) { - while (resultSet.next()) { - collect(convertToRowFromResultSet(resultSet)); - } - } else { - ArrayList rows = new ArrayList<>(); - while (resultSet.next()) { - Row row = convertToRowFromResultSet(resultSet); - rows.add(row); - collect(row); - } - rows.trimToSize(); - cache.put(keyRow, rows); - } - } - break; - } catch (SQLException e) { - LOG.error(String.format("JDBC executeBatch error, retry times = %d", retry), e); - if (retry >= maxRetryTimes) { - throw new RuntimeException("Execution of JDBC statement failed.", e); - } - - try { - if (!connectionProvider.isConnectionValid()) { - statement.close(); - connectionProvider.closeConnection(); - establishConnectionAndStatement(); - } - } catch (SQLException | ClassNotFoundException excpetion) { - LOG.error("JDBC connection is not valid, and reestablish connection failed", excpetion); - throw new RuntimeException("Reestablish JDBC connection failed", excpetion); - } - - try { - Thread.sleep(1000 * retry); - } catch (InterruptedException e1) { - throw new RuntimeException(e1); - } - } - } - } - - private Row convertToRowFromResultSet(ResultSet resultSet) throws SQLException { - Row row = new Row(outputSqlTypes.length); - for (int i = 0; i < outputSqlTypes.length; i++) { - row.setField(i, getFieldFromResultSet(i, outputSqlTypes[i], resultSet)); - } - return row; - } - - private void establishConnectionAndStatement() throws SQLException, ClassNotFoundException { - Connection dbConn = connectionProvider.getOrEstablishConnection(); - statement = dbConn.prepareStatement(query); - } - - @Override - public void close() throws IOException { - if (cache != null) { - cache.cleanUp(); - cache = null; - } - if (statement != null) { - try { - statement.close(); - } catch (SQLException e) { - LOG.info("JDBC statement could not be closed: " + e.getMessage()); - } finally { - statement = null; - } - } - - connectionProvider.closeConnection(); - } - - @VisibleForTesting - public Connection getDbConnection() { - return connectionProvider.getConnection(); - } - - @Override - public TypeInformation getResultType() { - return new RowTypeInfo(fieldTypes, fieldNames); - } - - @Override - public TypeInformation[] getParameterTypes(Class[] signature) { - return keyTypes; - } - - /** Builder for a {@link PhoenixLookupFunction}. */ - public static class Builder { - private JdbcOptions options; - private JdbcLookupOptions lookupOptions; - protected String[] fieldNames; - protected TypeInformation[] fieldTypes; - protected String[] keyNames; - - /** required, jdbc options. */ - public Builder setOptions(JdbcOptions options) { - this.options = options; - return this; - } - - /** optional, lookup related options. */ - public Builder setLookupOptions(JdbcLookupOptions lookupOptions) { - this.lookupOptions = lookupOptions; - return this; - } - - /** required, field names of this jdbc table. */ - public Builder setFieldNames(String[] fieldNames) { - this.fieldNames = fieldNames; - return this; - } - - /** required, field types of this jdbc table. */ - public Builder setFieldTypes(TypeInformation[] fieldTypes) { - this.fieldTypes = fieldTypes; - return this; - } - - /** required, key names to query this jdbc table. */ - public Builder setKeyNames(String[] keyNames) { - this.keyNames = keyNames; - return this; - } - - /** - * Finalizes the configuration and checks validity. - * - * @return Configured JdbcLookupFunction - */ - public PhoenixLookupFunction build() { - checkNotNull(options, "No JdbcOptions supplied."); - if (lookupOptions == null) { - lookupOptions = JdbcLookupOptions.builder().build(); - } - checkNotNull(fieldNames, "No fieldNames supplied."); - checkNotNull(fieldTypes, "No fieldTypes supplied."); - checkNotNull(keyNames, "No keyNames supplied."); - - return new PhoenixLookupFunction(options, lookupOptions, fieldNames, fieldTypes, keyNames); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixRowDataLookupFunction.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixRowDataLookupFunction.java deleted file mode 100644 index 11386b2d5f..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixRowDataLookupFunction.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import static org.apache.flink.util.Preconditions.checkArgument; -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.annotation.VisibleForTesting; -import org.apache.flink.connector.phoenix.dialect.JdbcDialect; -import org.apache.flink.connector.phoenix.dialect.JdbcDialects; -import org.apache.flink.connector.phoenix.internal.connection.JdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.connection.PhoneixJdbcConnectionProvider; -import org.apache.flink.connector.phoenix.internal.converter.JdbcRowConverter; -import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions; -import org.apache.flink.connector.phoenix.internal.options.PhoenixJdbcOptions; -import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatement; -import org.apache.flink.shaded.guava30.com.google.common.cache.Cache; -import org.apache.flink.shaded.guava30.com.google.common.cache.CacheBuilder; -import org.apache.flink.table.data.GenericRowData; -import org.apache.flink.table.data.RowData; -import org.apache.flink.table.functions.FunctionContext; -import org.apache.flink.table.functions.TableFunction; -import org.apache.flink.table.types.DataType; -import org.apache.flink.table.types.logical.LogicalType; -import org.apache.flink.table.types.logical.RowType; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** A lookup function for {@link PhoenixRowDataLookupFunction}. */ -@Internal -public class PhoenixRowDataLookupFunction extends TableFunction { - - private static final Logger LOG = LoggerFactory.getLogger(PhoenixRowDataLookupFunction.class); - private static final long serialVersionUID = 2L; - - private final String query; - private final JdbcConnectionProvider connectionProvider; - private final DataType[] keyTypes; - private final String[] keyNames; - private final long cacheMaxSize; - private final long cacheExpireMs; - private final int maxRetryTimes; - private final JdbcDialect jdbcDialect; - private final JdbcRowConverter jdbcRowConverter; - private final JdbcRowConverter lookupKeyRowConverter; - - private transient FieldNamedPreparedStatement statement; - private transient Cache> cache; - - public PhoenixRowDataLookupFunction( - PhoenixJdbcOptions options, - JdbcLookupOptions lookupOptions, - String[] fieldNames, - DataType[] fieldTypes, - String[] keyNames, - RowType rowType) { - checkNotNull(options, "No JdbcOptions supplied."); - checkNotNull(fieldNames, "No fieldNames supplied."); - checkNotNull(fieldTypes, "No fieldTypes supplied."); - checkNotNull(keyNames, "No keyNames supplied."); - this.connectionProvider = new PhoneixJdbcConnectionProvider( - options, options.getNamespaceMappingEnabled(), options.getMapSystemTablesToNamespace()); - this.keyNames = keyNames; - List nameList = Arrays.asList(fieldNames); - this.keyTypes = Arrays.stream(keyNames) - .map(s -> { - checkArgument(nameList.contains(s), "keyName %s can't find in fieldNames %s.", s, nameList); - return fieldTypes[nameList.indexOf(s)]; - }) - .toArray(DataType[]::new); - this.cacheMaxSize = lookupOptions.getCacheMaxSize(); - this.cacheExpireMs = lookupOptions.getCacheExpireMs(); - this.maxRetryTimes = lookupOptions.getMaxRetryTimes(); - this.query = options.getDialect().getSelectFromStatement(options.getTableName(), fieldNames, keyNames); - String dbURL = options.getDbURL(); - this.jdbcDialect = JdbcDialects.get(dbURL) - .orElseThrow(() -> new UnsupportedOperationException(String.format("Unknown dbUrl:%s", dbURL))); - this.jdbcRowConverter = jdbcDialect.getRowConverter(rowType); - this.lookupKeyRowConverter = jdbcDialect.getRowConverter( - RowType.of(Arrays.stream(keyTypes).map(DataType::getLogicalType).toArray(LogicalType[]::new))); - } - - @Override - public void open(FunctionContext context) throws Exception { - try { - establishConnectionAndStatement(); - this.cache = cacheMaxSize == -1 || cacheExpireMs == -1 - ? null - : CacheBuilder.newBuilder() - .expireAfterWrite(cacheExpireMs, TimeUnit.MILLISECONDS) - .maximumSize(cacheMaxSize) - .build(); - } catch (SQLException sqe) { - throw new IllegalArgumentException("open() failed.", sqe); - } catch (ClassNotFoundException cnfe) { - throw new IllegalArgumentException("JDBC driver class not found.", cnfe); - } - } - - /** - * This is a lookup method which is called by Flink framework in runtime. - * - * @param keys lookup keys - */ - public void eval(Object... keys) { - RowData keyRow = GenericRowData.of(keys); - if (cache != null) { - List cachedRows = cache.getIfPresent(keyRow); - if (cachedRows != null) { - for (RowData cachedRow : cachedRows) { - collect(cachedRow); - } - return; - } - } - - for (int retry = 0; retry <= maxRetryTimes; retry++) { - try { - statement.clearParameters(); - statement = lookupKeyRowConverter.toExternal(keyRow, statement); - try (ResultSet resultSet = statement.executeQuery()) { - if (cache == null) { - while (resultSet.next()) { - collect(jdbcRowConverter.toInternal(resultSet)); - } - } else { - ArrayList rows = new ArrayList<>(); - while (resultSet.next()) { - RowData row = jdbcRowConverter.toInternal(resultSet); - rows.add(row); - collect(row); - } - rows.trimToSize(); - cache.put(keyRow, rows); - } - } - break; - } catch (SQLException e) { - LOG.error(String.format("JDBC executeBatch error, retry times = %d", retry), e); - if (retry >= maxRetryTimes) { - throw new RuntimeException("Execution of JDBC statement failed.", e); - } - - try { - if (!connectionProvider.isConnectionValid()) { - statement.close(); - connectionProvider.closeConnection(); - establishConnectionAndStatement(); - } - } catch (SQLException | ClassNotFoundException excpetion) { - LOG.error("JDBC connection is not valid, and reestablish connection failed", excpetion); - throw new RuntimeException("Reestablish JDBC connection failed", excpetion); - } - - try { - Thread.sleep(1000 * retry); - } catch (InterruptedException e1) { - throw new RuntimeException(e1); - } - } - } - } - - private void establishConnectionAndStatement() throws SQLException, ClassNotFoundException { - Connection dbConn = connectionProvider.getOrEstablishConnection(); - statement = FieldNamedPreparedStatement.prepareStatement(dbConn, query, keyNames); - LOG.info("executor query SQL : " + query); - } - - @Override - public void close() throws IOException { - if (cache != null) { - cache.cleanUp(); - cache = null; - } - if (statement != null) { - try { - statement.close(); - } catch (SQLException e) { - LOG.info("JDBC statement could not be closed: " + e.getMessage()); - } finally { - statement = null; - } - } - - connectionProvider.closeConnection(); - } - - @VisibleForTesting - public Connection getDbConnection() { - return connectionProvider.getConnection(); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixTableSource.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixTableSource.java deleted file mode 100644 index 9dc7bc1668..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixTableSource.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import static org.apache.flink.table.types.utils.TypeConversions.fromDataTypeToLegacyInfo; -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.connector.phoenix.PhoenixInputFormat; -import org.apache.flink.connector.phoenix.dialect.JdbcDialect; -import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions; -import org.apache.flink.connector.phoenix.internal.options.JdbcOptions; -import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions; -import org.apache.flink.connector.phoenix.split.JdbcNumericBetweenParametersProvider; -import org.apache.flink.connector.phoenix.statement.FieldNamedPreparedStatementImpl; -import org.apache.flink.connector.phoenix.utils.JdbcTypeUtil; -import org.apache.flink.streaming.api.datastream.DataStream; -import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.functions.AsyncTableFunction; -import org.apache.flink.table.functions.TableFunction; -import org.apache.flink.table.sources.LookupableTableSource; -import org.apache.flink.table.sources.ProjectableTableSource; -import org.apache.flink.table.sources.StreamTableSource; -import org.apache.flink.table.sources.TableSource; -import org.apache.flink.table.types.DataType; -import org.apache.flink.table.utils.TableConnectorUtils; -import org.apache.flink.types.Row; - -import java.util.Arrays; -import java.util.HashMap; -import java.util.Objects; - -/** {@link TableSource} for JDBC. */ -public class PhoenixTableSource - implements StreamTableSource, ProjectableTableSource, LookupableTableSource { - - private final JdbcOptions options; - private final JdbcReadOptions readOptions; - private final JdbcLookupOptions lookupOptions; - private final TableSchema schema; - - // index of fields selected, null means that all fields are selected - private final int[] selectFields; - private final DataType producedDataType; - - private PhoenixTableSource( - JdbcOptions options, JdbcReadOptions readOptions, JdbcLookupOptions lookupOptions, TableSchema schema) { - this(options, readOptions, lookupOptions, schema, null); - } - - private PhoenixTableSource( - JdbcOptions options, - JdbcReadOptions readOptions, - JdbcLookupOptions lookupOptions, - TableSchema schema, - int[] selectFields) { - this.options = options; - this.readOptions = readOptions; - this.lookupOptions = lookupOptions; - this.schema = schema; - - this.selectFields = selectFields; - - final DataType[] schemaDataTypes = schema.getFieldDataTypes(); - final String[] schemaFieldNames = schema.getFieldNames(); - if (selectFields != null) { - DataType[] dataTypes = new DataType[selectFields.length]; - String[] fieldNames = new String[selectFields.length]; - for (int i = 0; i < selectFields.length; i++) { - dataTypes[i] = schemaDataTypes[selectFields[i]]; - fieldNames[i] = schemaFieldNames[selectFields[i]]; - } - this.producedDataType = - TableSchema.builder().fields(fieldNames, dataTypes).build().toRowDataType(); - } else { - this.producedDataType = schema.toRowDataType(); - } - } - - @Override - public boolean isBounded() { - return true; - } - - @Override - public DataStream getDataStream(StreamExecutionEnvironment execEnv) { - return execEnv.createInput(getInputFormat(), (RowTypeInfo) fromDataTypeToLegacyInfo(producedDataType)) - .name(explainSource()); - } - - @Override - public TableFunction getLookupFunction(String[] lookupKeys) { - final RowTypeInfo rowTypeInfo = (RowTypeInfo) fromDataTypeToLegacyInfo(producedDataType); - return PhoenixLookupFunction.builder() - .setOptions(options) - .setLookupOptions(lookupOptions) - .setFieldTypes(rowTypeInfo.getFieldTypes()) - .setFieldNames(rowTypeInfo.getFieldNames()) - .setKeyNames(lookupKeys) - .build(); - } - - @Override - public DataType getProducedDataType() { - return producedDataType; - } - - @Override - public TableSource projectFields(int[] fields) { - return new PhoenixTableSource(options, readOptions, lookupOptions, schema, fields); - } - - @Override - public AsyncTableFunction getAsyncLookupFunction(String[] lookupKeys) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean isAsyncEnabled() { - return false; - } - - @Override - public TableSchema getTableSchema() { - return schema; - } - - @Override - public String explainSource() { - final RowTypeInfo rowTypeInfo = (RowTypeInfo) fromDataTypeToLegacyInfo(producedDataType); - return TableConnectorUtils.generateRuntimeName(getClass(), rowTypeInfo.getFieldNames()); - } - - public static Builder builder() { - return new Builder(); - } - - private PhoenixInputFormat getInputFormat() { - final RowTypeInfo rowTypeInfo = (RowTypeInfo) fromDataTypeToLegacyInfo(producedDataType); - PhoenixInputFormat.PhoenixInputFormatBuilder builder = PhoenixInputFormat.buildJdbcInputFormat() - .setDrivername(options.getDriverName()) - .setDBUrl(options.getDbURL()) - .setRowTypeInfo(new RowTypeInfo(rowTypeInfo.getFieldTypes(), rowTypeInfo.getFieldNames())) - // 添加phoenix配置支持 - .setMapSystemTablesEnabled(options.isMapSystemTablesEnabled()) - .setNamespaceMappingEnabled(options.isNamespaceMappingEnabled()); - options.getUsername().ifPresent(builder::setUsername); - options.getPassword().ifPresent(builder::setPassword); - - if (readOptions.getFetchSize() != 0) { - builder.setFetchSize(readOptions.getFetchSize()); - } - - final JdbcDialect dialect = options.getDialect(); - String query = getBaseQueryStatement(rowTypeInfo); - if (readOptions.getPartitionColumnName().isPresent()) { - long lowerBound = readOptions.getPartitionLowerBound().get(); - long upperBound = readOptions.getPartitionUpperBound().get(); - int numPartitions = readOptions.getNumPartitions().get(); - builder.setParametersProvider( - new JdbcNumericBetweenParametersProvider(lowerBound, upperBound).ofBatchNum(numPartitions)); - query += " WHERE " - + dialect.quoteIdentifier( - readOptions.getPartitionColumnName().get()) - + " BETWEEN ? AND ?"; - } - builder.setQuery(query); - - return builder.finish(); - } - - private String getBaseQueryStatement(RowTypeInfo rowTypeInfo) { - return readOptions - .getQuery() - .orElseGet(() -> FieldNamedPreparedStatementImpl.parseNamedStatement( - options.getDialect() - .getSelectFromStatement( - options.getTableName(), rowTypeInfo.getFieldNames(), new String[0]), - new HashMap<>())); - } - - @Override - public boolean equals(Object o) { - if (o instanceof PhoenixTableSource) { - PhoenixTableSource source = (PhoenixTableSource) o; - return Objects.equals(options, source.options) - && Objects.equals(readOptions, source.readOptions) - && Objects.equals(lookupOptions, source.lookupOptions) - && Objects.equals(schema, source.schema) - && Arrays.equals(selectFields, source.selectFields); - } else { - return false; - } - } - - /** Builder for a {@link PhoenixTableSource}. */ - public static class Builder { - - private JdbcOptions options; - private JdbcReadOptions readOptions; - private JdbcLookupOptions lookupOptions; - protected TableSchema schema; - - /** required, jdbc options. */ - public Builder setOptions(JdbcOptions options) { - this.options = options; - return this; - } - - /** - * optional, scan related options. {@link JdbcReadOptions} will be only used for {@link - * StreamTableSource}. - */ - public Builder setReadOptions(JdbcReadOptions readOptions) { - this.readOptions = readOptions; - return this; - } - - /** - * optional, lookup related options. {@link JdbcLookupOptions} only be used for {@link - * LookupableTableSource}. - */ - public Builder setLookupOptions(JdbcLookupOptions lookupOptions) { - this.lookupOptions = lookupOptions; - return this; - } - - /** required, table schema of this table source. */ - public Builder setSchema(TableSchema schema) { - this.schema = JdbcTypeUtil.normalizeTableSchema(schema); - return this; - } - - /** - * Finalizes the configuration and checks validity. - * - * @return Configured JdbcTableSource - */ - public PhoenixTableSource build() { - checkNotNull(options, "No options supplied."); - checkNotNull(schema, "No schema supplied."); - if (readOptions == null) { - readOptions = JdbcReadOptions.builder().build(); - } - if (lookupOptions == null) { - lookupOptions = JdbcLookupOptions.builder().build(); - } - return new PhoenixTableSource(options, readOptions, lookupOptions, schema); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixTableSourceSinkFactory.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixTableSourceSinkFactory.java deleted file mode 100644 index 39a2df1e69..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixTableSourceSinkFactory.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_CONNECTION_MAX_RETRY_TIMEOUT; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_DRIVER; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_LOOKUP_CACHE_MAX_ROWS; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_LOOKUP_CACHE_TTL; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_LOOKUP_MAX_RETRIES; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_PASSWORD; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_READ_FETCH_SIZE; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_READ_PARTITION_COLUMN; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_READ_PARTITION_LOWER_BOUND; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_READ_PARTITION_NUM; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_READ_PARTITION_UPPER_BOUND; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_READ_QUERY; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_TABLE; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_TYPE_VALUE_JDBC; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_URL; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_USERNAME; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_WRITE_FLUSH_INTERVAL; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_WRITE_FLUSH_MAX_ROWS; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.CONNECTOR_WRITE_MAX_RETRIES; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.PHOENIX_SCHEMA_MAP_SYSTEMTABLE_ENABLE; -import static org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator.PHOENIX_SCHEMA_NAMESPACE_MAPPING_ENABLE; -import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_PROPERTY_VERSION; -import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_TYPE; -import static org.apache.flink.table.descriptors.DescriptorProperties.COMMENT; -import static org.apache.flink.table.descriptors.DescriptorProperties.EXPR; -import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK; -import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK_ROWTIME; -import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK_STRATEGY_DATA_TYPE; -import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK_STRATEGY_EXPR; -import static org.apache.flink.table.descriptors.Schema.SCHEMA; -import static org.apache.flink.table.descriptors.Schema.SCHEMA_DATA_TYPE; -import static org.apache.flink.table.descriptors.Schema.SCHEMA_NAME; -import static org.apache.flink.table.descriptors.Schema.SCHEMA_TYPE; - -import org.apache.flink.api.java.tuple.Tuple2; -import org.apache.flink.connector.phoenix.dialect.JdbcDialects; -import org.apache.flink.connector.phoenix.internal.options.JdbcLookupOptions; -import org.apache.flink.connector.phoenix.internal.options.JdbcOptions; -import org.apache.flink.connector.phoenix.internal.options.JdbcReadOptions; -import org.apache.flink.connector.phoenix.utils.PhoenixJdbcValidator; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.descriptors.DescriptorProperties; -import org.apache.flink.table.descriptors.SchemaValidator; -import org.apache.flink.table.factories.StreamTableSinkFactory; -import org.apache.flink.table.factories.StreamTableSourceFactory; -import org.apache.flink.table.sinks.StreamTableSink; -import org.apache.flink.table.sources.StreamTableSource; -import org.apache.flink.table.utils.TableSchemaUtils; -import org.apache.flink.types.Row; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -public class PhoenixTableSourceSinkFactory - implements StreamTableSourceFactory, StreamTableSinkFactory> { - - @Override - public Map requiredContext() { - Map context = new HashMap<>(); - // context.put(CONNECTOR_TYPE, CONNECTOR_TYPE_VALUE_JDBC); // jdbc - context.put(CONNECTOR_TYPE, CONNECTOR_TYPE_VALUE_JDBC); // phoenix - context.put(CONNECTOR_PROPERTY_VERSION, "1"); // backwards compatibility - - return context; - } - - @Override - public List supportedProperties() { - List properties = new ArrayList<>(); - - // phoenix - properties.add(PHOENIX_SCHEMA_NAMESPACE_MAPPING_ENABLE); - properties.add(PHOENIX_SCHEMA_MAP_SYSTEMTABLE_ENABLE); - - // common options - properties.add(CONNECTOR_DRIVER); - properties.add(CONNECTOR_URL); - properties.add(CONNECTOR_TABLE); - properties.add(CONNECTOR_USERNAME); - properties.add(CONNECTOR_PASSWORD); - properties.add(CONNECTOR_CONNECTION_MAX_RETRY_TIMEOUT); - - // scan options - properties.add(CONNECTOR_READ_QUERY); - properties.add(CONNECTOR_READ_PARTITION_COLUMN); - properties.add(CONNECTOR_READ_PARTITION_NUM); - properties.add(CONNECTOR_READ_PARTITION_LOWER_BOUND); - properties.add(CONNECTOR_READ_PARTITION_UPPER_BOUND); - properties.add(CONNECTOR_READ_FETCH_SIZE); - - // lookup options - properties.add(CONNECTOR_LOOKUP_CACHE_MAX_ROWS); - properties.add(CONNECTOR_LOOKUP_CACHE_TTL); - properties.add(CONNECTOR_LOOKUP_MAX_RETRIES); - - // sink options - properties.add(CONNECTOR_WRITE_FLUSH_MAX_ROWS); - properties.add(CONNECTOR_WRITE_FLUSH_INTERVAL); - properties.add(CONNECTOR_WRITE_MAX_RETRIES); - - // schema - properties.add(SCHEMA + ".#." + SCHEMA_DATA_TYPE); - properties.add(SCHEMA + ".#." + SCHEMA_TYPE); - properties.add(SCHEMA + ".#." + SCHEMA_NAME); - // computed column - properties.add(SCHEMA + ".#." + EXPR); - - // watermark - properties.add(SCHEMA + "." + WATERMARK + ".#." + WATERMARK_ROWTIME); - properties.add(SCHEMA + "." + WATERMARK + ".#." + WATERMARK_STRATEGY_EXPR); - properties.add(SCHEMA + "." + WATERMARK + ".#." + WATERMARK_STRATEGY_DATA_TYPE); - - // table constraint - properties.add(SCHEMA + "." + DescriptorProperties.PRIMARY_KEY_NAME); - properties.add(SCHEMA + "." + DescriptorProperties.PRIMARY_KEY_COLUMNS); - - // comment - properties.add(COMMENT); - - return properties; - } - - @Override - public StreamTableSource createStreamTableSource(Map properties) { - DescriptorProperties descriptorProperties = getValidatedProperties(properties); - TableSchema schema = TableSchemaUtils.getPhysicalSchema(descriptorProperties.getTableSchema(SCHEMA)); - - return PhoenixTableSource.builder() - .setOptions(getJdbcOptions(descriptorProperties)) - .setReadOptions(getJdbcReadOptions(descriptorProperties)) - .setLookupOptions(getJdbcLookupOptions(descriptorProperties)) - .setSchema(schema) - .build(); - } - - @Override - public StreamTableSink> createStreamTableSink(Map properties) { - DescriptorProperties descriptorProperties = getValidatedProperties(properties); - TableSchema schema = TableSchemaUtils.getPhysicalSchema(descriptorProperties.getTableSchema(SCHEMA)); - - final PhoenixUpsertTableSink.Builder builder = PhoenixUpsertTableSink.builder() - .setOptions(getJdbcOptions(descriptorProperties)) - .setTableSchema(schema); - - descriptorProperties.getOptionalInt(CONNECTOR_WRITE_FLUSH_MAX_ROWS).ifPresent(builder::setFlushMaxSize); - descriptorProperties - .getOptionalDuration(CONNECTOR_WRITE_FLUSH_INTERVAL) - .ifPresent(s -> builder.setFlushIntervalMills(s.toMillis())); - descriptorProperties.getOptionalInt(CONNECTOR_WRITE_MAX_RETRIES).ifPresent(builder::setMaxRetryTimes); - - return builder.build(); - } - - private DescriptorProperties getValidatedProperties(Map properties) { - final DescriptorProperties descriptorProperties = new DescriptorProperties(true); - descriptorProperties.putProperties(properties); - - new SchemaValidator(true, false, false).validate(descriptorProperties); - new PhoenixJdbcValidator().validate(descriptorProperties); - - return descriptorProperties; - } - - private JdbcOptions getJdbcOptions(DescriptorProperties descriptorProperties) { - final String url = descriptorProperties.getString(CONNECTOR_URL); - final JdbcOptions.Builder builder = JdbcOptions.builder() - .setDBUrl(url) - .setTableName(descriptorProperties.getString(CONNECTOR_TABLE)) - .setDialect(JdbcDialects.get(url).get()) - .setNamespaceMappingEnabled( - Boolean.parseBoolean(descriptorProperties.getString(PHOENIX_SCHEMA_NAMESPACE_MAPPING_ENABLE))) - .setMapSystemTablesEnabled( - Boolean.parseBoolean(descriptorProperties.getString(PHOENIX_SCHEMA_MAP_SYSTEMTABLE_ENABLE))); - - descriptorProperties - .getOptionalDuration(CONNECTOR_CONNECTION_MAX_RETRY_TIMEOUT) - .ifPresent(s -> builder.setConnectionCheckTimeoutSeconds((int) s.getSeconds())); - descriptorProperties.getOptionalString(CONNECTOR_DRIVER).ifPresent(builder::setDriverName); - descriptorProperties.getOptionalString(CONNECTOR_USERNAME).ifPresent(builder::setUsername); - descriptorProperties.getOptionalString(CONNECTOR_PASSWORD).ifPresent(builder::setPassword); - - return builder.build(); - } - - private JdbcReadOptions getJdbcReadOptions(DescriptorProperties descriptorProperties) { - final Optional query = descriptorProperties.getOptionalString(CONNECTOR_READ_QUERY); - final Optional partitionColumnName = - descriptorProperties.getOptionalString(CONNECTOR_READ_PARTITION_COLUMN); - final Optional partitionLower = - descriptorProperties.getOptionalLong(CONNECTOR_READ_PARTITION_LOWER_BOUND); - final Optional partitionUpper = - descriptorProperties.getOptionalLong(CONNECTOR_READ_PARTITION_UPPER_BOUND); - final Optional numPartitions = descriptorProperties.getOptionalInt(CONNECTOR_READ_PARTITION_NUM); - - final JdbcReadOptions.Builder builder = JdbcReadOptions.builder(); - if (query.isPresent()) { - builder.setQuery(query.get()); - } - if (partitionColumnName.isPresent()) { - builder.setPartitionColumnName(partitionColumnName.get()); - builder.setPartitionLowerBound(partitionLower.get()); - builder.setPartitionUpperBound(partitionUpper.get()); - builder.setNumPartitions(numPartitions.get()); - } - descriptorProperties.getOptionalInt(CONNECTOR_READ_FETCH_SIZE).ifPresent(builder::setFetchSize); - - return builder.build(); - } - - private JdbcLookupOptions getJdbcLookupOptions(DescriptorProperties descriptorProperties) { - final JdbcLookupOptions.Builder builder = JdbcLookupOptions.builder(); - - descriptorProperties.getOptionalLong(CONNECTOR_LOOKUP_CACHE_MAX_ROWS).ifPresent(builder::setCacheMaxSize); - descriptorProperties - .getOptionalDuration(CONNECTOR_LOOKUP_CACHE_TTL) - .ifPresent(s -> builder.setCacheExpireMs(s.toMillis())); - descriptorProperties.getOptionalInt(CONNECTOR_LOOKUP_MAX_RETRIES).ifPresent(builder::setMaxRetryTimes); - - return builder.build(); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixUpsertTableSink.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixUpsertTableSink.java deleted file mode 100644 index 8fef142b8a..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/table/PhoenixUpsertTableSink.java +++ /dev/null @@ -1,227 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.table; - -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.common.typeinfo.Types; -import org.apache.flink.api.java.tuple.Tuple2; -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.api.java.typeutils.TupleTypeInfo; -import org.apache.flink.connector.phoenix.JdbcExecutionOptions; -import org.apache.flink.connector.phoenix.internal.AbstractJdbcOutputFormat; -import org.apache.flink.connector.phoenix.internal.GenericJdbcSinkFunction; -import org.apache.flink.connector.phoenix.internal.JdbcBatchingOutputFormat; -import org.apache.flink.connector.phoenix.internal.executor.JdbcBatchStatementExecutor; -import org.apache.flink.connector.phoenix.internal.options.JdbcOptions; -import org.apache.flink.connector.phoenix.utils.JdbcTypeUtil; -import org.apache.flink.streaming.api.datastream.DataStream; -import org.apache.flink.streaming.api.datastream.DataStreamSink; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.api.ValidationException; -import org.apache.flink.table.sinks.TableSink; -import org.apache.flink.table.sinks.UpsertStreamTableSink; -import org.apache.flink.table.utils.TableConnectorUtils; -import org.apache.flink.table.utils.TableSchemaUtils; -import org.apache.flink.types.Row; - -import java.util.Arrays; -import java.util.Objects; - -/** An upsert {@link UpsertStreamTableSink} for JDBC. */ -public class PhoenixUpsertTableSink implements UpsertStreamTableSink { - - private final TableSchema schema; - private final JdbcOptions options; - private final int flushMaxSize; - private final long flushIntervalMills; - private final int maxRetryTime; - - private String[] keyFields; - private boolean isAppendOnly; - - private PhoenixUpsertTableSink( - TableSchema schema, JdbcOptions options, int flushMaxSize, long flushIntervalMills, int maxRetryTime) { - this.schema = TableSchemaUtils.checkOnlyPhysicalColumns(schema); - this.options = options; - this.flushMaxSize = flushMaxSize; - this.flushIntervalMills = flushIntervalMills; - this.maxRetryTime = maxRetryTime; - } - - private JdbcBatchingOutputFormat, Row, JdbcBatchStatementExecutor> newFormat() { - if (!isAppendOnly && (keyFields == null || keyFields.length == 0)) { - throw new UnsupportedOperationException("JdbcUpsertTableSink can not support "); - } - - // sql types - int[] jdbcSqlTypes = Arrays.stream(schema.getFieldTypes()) - .mapToInt(JdbcTypeUtil::typeInformationToSqlType) - .toArray(); - - return JdbcBatchingOutputFormat.builder() - .setOptions(options) - .setFieldNames(schema.getFieldNames()) - .setFlushMaxSize(flushMaxSize) - .setFlushIntervalMills(flushIntervalMills) - .setMaxRetryTimes(maxRetryTime) - .setFieldTypes(jdbcSqlTypes) - .setKeyFields(keyFields) - .build(); - } - - @Override - public DataStreamSink consumeDataStream(DataStream> dataStream) { - - return dataStream - .addSink(new GenericJdbcSinkFunction<>(newFormat())) - // .addSink(new PhoenixSinkFunction( - // options, - // new - // PhoneixJdbcConnectionProvider(options,options.isNamespaceMappingEnabled(), - // options.isMapSystemTablesEnabled()), - // getFieldNames(), - // keyFields, - // jdbcSqlTypes - // )) - .setParallelism(dataStream.getParallelism()) - .name(TableConnectorUtils.generateRuntimeName(this.getClass(), schema.getFieldNames())); - } - - @Override - public void setKeyFields(String[] keys) { - this.keyFields = keys; - } - - @Override - public void setIsAppendOnly(Boolean isAppendOnly) { - this.isAppendOnly = isAppendOnly; - } - - @Override - public TypeInformation> getOutputType() { - return new TupleTypeInfo<>(Types.BOOLEAN, getRecordType()); - } - - @Override - public TypeInformation getRecordType() { - return new RowTypeInfo(schema.getFieldTypes(), schema.getFieldNames()); - } - - @Override - public String[] getFieldNames() { - return schema.getFieldNames(); - } - - @Override - public TypeInformation[] getFieldTypes() { - return schema.getFieldTypes(); - } - - @Override - public TableSink> configure(String[] fieldNames, TypeInformation[] fieldTypes) { - if (!Arrays.equals(getFieldNames(), fieldNames) || !Arrays.equals(getFieldTypes(), fieldTypes)) { - throw new ValidationException("Reconfiguration with different fields is not allowed. " - + "Expected: " - + Arrays.toString(getFieldNames()) - + " / " - + Arrays.toString(getFieldTypes()) - + ". " - + "But was: " - + Arrays.toString(fieldNames) - + " / " - + Arrays.toString(fieldTypes)); - } - - PhoenixUpsertTableSink copy = - new PhoenixUpsertTableSink(schema, options, flushMaxSize, flushIntervalMills, maxRetryTime); - copy.keyFields = keyFields; - return copy; - } - - public static Builder builder() { - return new Builder(); - } - - @Override - public boolean equals(Object o) { - if (o instanceof PhoenixUpsertTableSink) { - PhoenixUpsertTableSink sink = (PhoenixUpsertTableSink) o; - return Objects.equals(schema, sink.schema) - && Objects.equals(options, sink.options) - && Objects.equals(flushMaxSize, sink.flushMaxSize) - && Objects.equals(flushIntervalMills, sink.flushIntervalMills) - && Objects.equals(maxRetryTime, sink.maxRetryTime) - && Arrays.equals(keyFields, sink.keyFields) - && Objects.equals(isAppendOnly, sink.isAppendOnly); - } else { - return false; - } - } - - /** Builder for a {@link PhoenixUpsertTableSink}. */ - public static class Builder { - protected TableSchema schema; - private JdbcOptions options; - protected int flushMaxSize = AbstractJdbcOutputFormat.DEFAULT_FLUSH_MAX_SIZE; - protected long flushIntervalMills = AbstractJdbcOutputFormat.DEFAULT_FLUSH_INTERVAL_MILLS; - protected int maxRetryTimes = JdbcExecutionOptions.DEFAULT_MAX_RETRY_TIMES; - - /** required, table schema of this table source. */ - public Builder setTableSchema(TableSchema schema) { - this.schema = JdbcTypeUtil.normalizeTableSchema(schema); - return this; - } - - /** required, jdbc options. */ - public Builder setOptions(JdbcOptions options) { - this.options = options; - return this; - } - - /** - * optional, flush max size (includes all append, upsert and delete records), over this - * number of records, will flush data. - */ - public Builder setFlushMaxSize(int flushMaxSize) { - this.flushMaxSize = flushMaxSize; - return this; - } - - /** optional, flush interval mills, over this time, asynchronous threads will flush data. */ - public Builder setFlushIntervalMills(long flushIntervalMills) { - this.flushIntervalMills = flushIntervalMills; - return this; - } - - /** optional, max retry times for jdbc connector. */ - public Builder setMaxRetryTimes(int maxRetryTimes) { - this.maxRetryTimes = maxRetryTimes; - return this; - } - - public PhoenixUpsertTableSink build() { - checkNotNull(schema, "No schema supplied."); - checkNotNull(options, "No options supplied."); - return new PhoenixUpsertTableSink(schema, options, flushMaxSize, flushIntervalMills, maxRetryTimes); - } - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/JdbcTypeUtil.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/JdbcTypeUtil.java deleted file mode 100644 index 9ce2dad135..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/JdbcTypeUtil.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.utils; - -import static org.apache.flink.api.common.typeinfo.BasicTypeInfo.BIG_DEC_TYPE_INFO; -import static org.apache.flink.api.common.typeinfo.BasicTypeInfo.BOOLEAN_TYPE_INFO; -import static org.apache.flink.api.common.typeinfo.BasicTypeInfo.BYTE_TYPE_INFO; -import static org.apache.flink.api.common.typeinfo.BasicTypeInfo.DOUBLE_TYPE_INFO; -import static org.apache.flink.api.common.typeinfo.BasicTypeInfo.FLOAT_TYPE_INFO; -import static org.apache.flink.api.common.typeinfo.BasicTypeInfo.INT_TYPE_INFO; -import static org.apache.flink.api.common.typeinfo.BasicTypeInfo.LONG_TYPE_INFO; -import static org.apache.flink.api.common.typeinfo.BasicTypeInfo.SHORT_TYPE_INFO; -import static org.apache.flink.api.common.typeinfo.BasicTypeInfo.STRING_TYPE_INFO; -import static org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.api.common.typeinfo.LocalTimeTypeInfo; -import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo; -import org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo; -import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.typeutils.ObjectArrayTypeInfo; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.types.DataType; -import org.apache.flink.table.types.inference.TypeTransformations; -import org.apache.flink.table.types.utils.DataTypeUtils; - -import java.sql.Types; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** Utils for jdbc type. */ -@Internal -public class JdbcTypeUtil { - private static final Map, Integer> TYPE_MAPPING; - private static final Map SQL_TYPE_NAMES; - - static { - HashMap, Integer> m = new HashMap<>(); - m.put(STRING_TYPE_INFO, Types.VARCHAR); - m.put(BOOLEAN_TYPE_INFO, Types.BOOLEAN); - m.put(BYTE_TYPE_INFO, Types.TINYINT); - m.put(SHORT_TYPE_INFO, Types.SMALLINT); - m.put(INT_TYPE_INFO, Types.INTEGER); - m.put(LONG_TYPE_INFO, Types.BIGINT); - m.put(FLOAT_TYPE_INFO, Types.REAL); - m.put(DOUBLE_TYPE_INFO, Types.DOUBLE); - m.put(SqlTimeTypeInfo.DATE, Types.DATE); - m.put(SqlTimeTypeInfo.TIME, Types.TIME); - m.put(SqlTimeTypeInfo.TIMESTAMP, Types.TIMESTAMP); - m.put(LocalTimeTypeInfo.LOCAL_DATE, Types.DATE); - m.put(LocalTimeTypeInfo.LOCAL_TIME, Types.TIME); - m.put(LocalTimeTypeInfo.LOCAL_DATE_TIME, Types.TIMESTAMP); - m.put(BIG_DEC_TYPE_INFO, Types.DECIMAL); - m.put(BYTE_PRIMITIVE_ARRAY_TYPE_INFO, Types.BINARY); - TYPE_MAPPING = Collections.unmodifiableMap(m); - - HashMap names = new HashMap<>(); - names.put(Types.VARCHAR, "VARCHAR"); - names.put(Types.BOOLEAN, "BOOLEAN"); - names.put(Types.TINYINT, "TINYINT"); - names.put(Types.SMALLINT, "SMALLINT"); - names.put(Types.INTEGER, "INTEGER"); - names.put(Types.BIGINT, "BIGINT"); - names.put(Types.FLOAT, "FLOAT"); - names.put(Types.DOUBLE, "DOUBLE"); - names.put(Types.CHAR, "CHAR"); - names.put(Types.DATE, "DATE"); - names.put(Types.TIME, "TIME"); - names.put(Types.TIMESTAMP, "TIMESTAMP"); - names.put(Types.DECIMAL, "DECIMAL"); - names.put(Types.BINARY, "BINARY"); - SQL_TYPE_NAMES = Collections.unmodifiableMap(names); - } - - private JdbcTypeUtil() {} - - public static int typeInformationToSqlType(TypeInformation type) { - - if (TYPE_MAPPING.containsKey(type)) { - return TYPE_MAPPING.get(type); - } else if (type instanceof ObjectArrayTypeInfo || type instanceof PrimitiveArrayTypeInfo) { - return Types.ARRAY; - } else { - throw new IllegalArgumentException("Unsupported type: " + type); - } - } - - public static String getTypeName(int type) { - return SQL_TYPE_NAMES.get(type); - } - - public static String getTypeName(TypeInformation type) { - return SQL_TYPE_NAMES.get(typeInformationToSqlType(type)); - } - - /** - * The original table schema may contain generated columns which shouldn't be produced/consumed - * by TableSource/TableSink. And the original TIMESTAMP/DATE/TIME types uses - * LocalDateTime/LocalDate/LocalTime as the conversion classes, however, JDBC connector uses - * Timestamp/Date/Time classes. So that we bridge them to the expected conversion classes. - */ - public static TableSchema normalizeTableSchema(TableSchema schema) { - TableSchema.Builder physicalSchemaBuilder = TableSchema.builder(); - schema.getTableColumns().forEach(c -> { - if (c.isPhysical()) { - final DataType type = DataTypeUtils.transform(c.getType(), TypeTransformations.timeToSqlTypes()); - physicalSchemaBuilder.field(c.getName(), type); - } - }); - return physicalSchemaBuilder.build(); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/JdbcUtils.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/JdbcUtils.java deleted file mode 100644 index f887547172..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/JdbcUtils.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.utils; - -import org.apache.flink.types.Row; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Utils for jdbc connectors. */ -public class JdbcUtils { - - private static final Logger LOG = LoggerFactory.getLogger(JdbcUtils.class); - - /** - * Adds a record to the prepared statement. - * - *

When this method is called, the output format is guaranteed to be opened. - * - *

WARNING: this may fail when no column types specified (because a best effort approach is - * attempted in order to insert a null value but it's not guaranteed that the JDBC driver - * handles PreparedStatement.setObject(pos, null)) - * - * @param upload The prepared statement. - * @param typesArray The jdbc types of the row. - * @param row The records to add to the output. - * @see PreparedStatement - */ - public static void setRecordToStatement(PreparedStatement upload, int[] typesArray, Row row) throws SQLException { - if (typesArray != null && typesArray.length > 0 && typesArray.length != row.getArity()) { - LOG.warn("Column SQL types array doesn't match arity of passed Row! Check the passed" + " array..."); - } - if (typesArray == null) { - // no types provided - for (int index = 0; index < row.getArity(); index++) { - LOG.warn( - "Unknown column type for column {}. Best effort approach to set its value:" + " {}.", - index + 1, - row.getField(index)); - upload.setObject(index + 1, row.getField(index)); - } - } else { - // types provided - for (int i = 0; i < row.getArity(); i++) { - setField(upload, typesArray[i], row.getField(i), i); - } - } - } - - public static void setField(PreparedStatement upload, int type, Object field, int index) throws SQLException { - if (field == null) { - upload.setNull(index + 1, type); - } else { - try { - // casting values as suggested by - // http://docs.oracle.com/javase/1.5.0/docs/guide/jdbc/getstart/mapping.html - switch (type) { - case java.sql.Types.NULL: - upload.setNull(index + 1, type); - break; - case java.sql.Types.BOOLEAN: - case java.sql.Types.BIT: - upload.setBoolean(index + 1, (boolean) field); - break; - case java.sql.Types.CHAR: - case java.sql.Types.NCHAR: - case java.sql.Types.VARCHAR: - case java.sql.Types.LONGVARCHAR: - case java.sql.Types.LONGNVARCHAR: - upload.setString(index + 1, (String) field); - break; - case java.sql.Types.TINYINT: - upload.setByte(index + 1, (byte) field); - break; - case java.sql.Types.SMALLINT: - upload.setShort(index + 1, (short) field); - break; - case java.sql.Types.INTEGER: - upload.setInt(index + 1, (int) field); - break; - case java.sql.Types.BIGINT: - upload.setLong(index + 1, (long) field); - break; - case java.sql.Types.REAL: - upload.setFloat(index + 1, (float) field); - break; - case java.sql.Types.FLOAT: - case java.sql.Types.DOUBLE: - upload.setDouble(index + 1, (double) field); - break; - case java.sql.Types.DECIMAL: - case java.sql.Types.NUMERIC: - upload.setBigDecimal(index + 1, (java.math.BigDecimal) field); - break; - case java.sql.Types.DATE: - upload.setDate(index + 1, (java.sql.Date) field); - break; - case java.sql.Types.TIME: - upload.setTime(index + 1, (java.sql.Time) field); - break; - case java.sql.Types.TIMESTAMP: - upload.setTimestamp(index + 1, (java.sql.Timestamp) field); - break; - case java.sql.Types.BINARY: - case java.sql.Types.VARBINARY: - case java.sql.Types.LONGVARBINARY: - upload.setBytes(index + 1, (byte[]) field); - break; - default: - upload.setObject(index + 1, field); - LOG.warn( - "Unmanaged sql type ({}) for column {}. Best effort approach to" - + " set its value: {}.", - type, - index + 1, - field); - // case java.sql.Types.SQLXML - // case java.sql.Types.ARRAY: - // case java.sql.Types.JAVA_OBJECT: - // case java.sql.Types.BLOB: - // case java.sql.Types.CLOB: - // case java.sql.Types.NCLOB: - // case java.sql.Types.DATALINK: - // case java.sql.Types.DISTINCT: - // case java.sql.Types.OTHER: - // case java.sql.Types.REF: - // case java.sql.Types.ROWID: - // case java.sql.Types.STRUC - } - } catch (ClassCastException e) { - // enrich the exception with detailed information. - String errorMessage = - String.format("%s, field index: %s, field value: %s.", e.getMessage(), index, field); - ClassCastException enrichedException = new ClassCastException(errorMessage); - enrichedException.setStackTrace(e.getStackTrace()); - throw enrichedException; - } - } - } - - public static Object getFieldFromResultSet(int index, int type, ResultSet set) throws SQLException { - Object ret; - switch (type) { - case java.sql.Types.NULL: - ret = null; - break; - case java.sql.Types.BOOLEAN: - case java.sql.Types.BIT: - ret = set.getBoolean(index + 1); - break; - case java.sql.Types.CHAR: - case java.sql.Types.NCHAR: - case java.sql.Types.VARCHAR: - case java.sql.Types.LONGVARCHAR: - case java.sql.Types.LONGNVARCHAR: - ret = set.getString(index + 1); - break; - case java.sql.Types.TINYINT: - ret = set.getByte(index + 1); - break; - case java.sql.Types.SMALLINT: - ret = set.getShort(index + 1); - break; - case java.sql.Types.INTEGER: - ret = set.getInt(index + 1); - break; - case java.sql.Types.BIGINT: - ret = set.getLong(index + 1); - break; - case java.sql.Types.REAL: - ret = set.getFloat(index + 1); - break; - case java.sql.Types.FLOAT: - case java.sql.Types.DOUBLE: - ret = set.getDouble(index + 1); - break; - case java.sql.Types.DECIMAL: - case java.sql.Types.NUMERIC: - ret = set.getBigDecimal(index + 1); - break; - case java.sql.Types.DATE: - ret = set.getDate(index + 1); - break; - case java.sql.Types.TIME: - ret = set.getTime(index + 1); - break; - case java.sql.Types.TIMESTAMP: - ret = set.getTimestamp(index + 1); - break; - case java.sql.Types.BINARY: - case java.sql.Types.VARBINARY: - case java.sql.Types.LONGVARBINARY: - ret = set.getBytes(index + 1); - break; - default: - ret = set.getObject(index + 1); - LOG.warn( - "Unmanaged sql type ({}) for column {}. Best effort approach to get its" + " value: {}.", - type, - index + 1, - ret); - break; - - // case java.sql.Types.SQLXML - // case java.sql.Types.ARRAY: - // case java.sql.Types.JAVA_OBJECT: - // case java.sql.Types.BLOB: - // case java.sql.Types.CLOB: - // case java.sql.Types.NCLOB: - // case java.sql.Types.DATALINK: - // case java.sql.Types.DISTINCT: - // case java.sql.Types.OTHER: - // case java.sql.Types.REF: - // case java.sql.Types.ROWID: - // case java.sql.Types.STRUC - } - - if (set.wasNull()) { - return null; - } else { - return ret; - } - } - - public static Row getPrimaryKey(Row row, int[] pkFields) { - Row pkRow = new Row(pkFields.length); - for (int i = 0; i < pkFields.length; i++) { - pkRow.setField(i, row.getField(pkFields[i])); - } - return pkRow; - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/PhoenixJdbcValidator.java b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/PhoenixJdbcValidator.java deleted file mode 100644 index 2a53f0a3fc..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/java/org/apache/flink/connector/phoenix/utils/PhoenixJdbcValidator.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.connector.phoenix.utils; - -import static org.apache.flink.table.descriptors.Schema.SCHEMA; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.connector.phoenix.dialect.JdbcDialect; -import org.apache.flink.connector.phoenix.dialect.JdbcDialects; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.descriptors.ConnectorDescriptorValidator; -import org.apache.flink.table.descriptors.DescriptorProperties; -import org.apache.flink.table.utils.TableSchemaUtils; -import org.apache.flink.util.Preconditions; - -import java.util.Optional; - -/** The validator for JDBC. */ -@Internal -public class PhoenixJdbcValidator extends ConnectorDescriptorValidator { - - public static final String CONNECTOR_TYPE_VALUE_JDBC = "phoenix"; - public static final String PHOENIX_SCHEMA_NAMESPACE_MAPPING_ENABLE = "phoenix.schema.isnamespacemappingenabled"; - public static final String PHOENIX_SCHEMA_MAP_SYSTEMTABLE_ENABLE = "phoenix.schema.mapsystemtablestonamespace"; - - public static final String CONNECTOR_URL = "connector.url"; - public static final String CONNECTOR_TABLE = "connector.table"; - public static final String CONNECTOR_DRIVER = "connector.driver"; - public static final String CONNECTOR_USERNAME = "connector.username"; - public static final String CONNECTOR_PASSWORD = "connector.password"; - public static final String CONNECTOR_CONNECTION_MAX_RETRY_TIMEOUT = "connector.connection.max-retry-timeout"; - - public static final String CONNECTOR_READ_QUERY = "connector.read.query"; - public static final String CONNECTOR_READ_PARTITION_COLUMN = "connector.read.partition.column"; - public static final String CONNECTOR_READ_PARTITION_LOWER_BOUND = "connector.read.partition.lower-bound"; - public static final String CONNECTOR_READ_PARTITION_UPPER_BOUND = "connector.read.partition.upper-bound"; - public static final String CONNECTOR_READ_PARTITION_NUM = "connector.read.partition.num"; - public static final String CONNECTOR_READ_FETCH_SIZE = "connector.read.fetch-size"; - - public static final String CONNECTOR_LOOKUP_CACHE_MAX_ROWS = "connector.lookup.cache.max-rows"; - public static final String CONNECTOR_LOOKUP_CACHE_TTL = "connector.lookup.cache.ttl"; - public static final String CONNECTOR_LOOKUP_MAX_RETRIES = "connector.lookup.max-retries"; - - public static final String CONNECTOR_WRITE_FLUSH_MAX_ROWS = "connector.write.flush.max-rows"; - public static final String CONNECTOR_WRITE_FLUSH_INTERVAL = "connector.write.flush.interval"; - public static final String CONNECTOR_WRITE_MAX_RETRIES = "connector.write.max-retries"; - - @Override - public void validate(DescriptorProperties properties) { - super.validate(properties); - validateCommonProperties(properties); - validateReadProperties(properties); - validateLookupProperties(properties); - validateSinkProperties(properties); - } - - private void validateCommonProperties(DescriptorProperties properties) { - properties.validateString(CONNECTOR_URL, false, 1); - properties.validateString(CONNECTOR_TABLE, false, 1); - properties.validateString(CONNECTOR_DRIVER, true); - properties.validateString(CONNECTOR_USERNAME, true); - properties.validateString(CONNECTOR_PASSWORD, true); - properties.validateDuration(CONNECTOR_CONNECTION_MAX_RETRY_TIMEOUT, true, 1000); - - properties.validateString(PHOENIX_SCHEMA_NAMESPACE_MAPPING_ENABLE, true); - properties.validateString(PHOENIX_SCHEMA_MAP_SYSTEMTABLE_ENABLE, true); - - final String url = properties.getString(CONNECTOR_URL); - final Optional dialect = JdbcDialects.get(url); - Preconditions.checkState(dialect.isPresent(), "Cannot handle such jdbc url: " + url); - - TableSchema schema = TableSchemaUtils.getPhysicalSchema(properties.getTableSchema(SCHEMA)); - dialect.get().validate(schema); - - Optional password = properties.getOptionalString(CONNECTOR_PASSWORD); - if (password.isPresent()) { - Preconditions.checkArgument( - properties.getOptionalString(CONNECTOR_USERNAME).isPresent(), - "Database username must be provided when database password is provided"); - } - } - - private void validateReadProperties(DescriptorProperties properties) { - properties.validateString(CONNECTOR_READ_QUERY, true); - properties.validateString(CONNECTOR_READ_PARTITION_COLUMN, true); - properties.validateLong(CONNECTOR_READ_PARTITION_LOWER_BOUND, true); - properties.validateLong(CONNECTOR_READ_PARTITION_UPPER_BOUND, true); - properties.validateInt(CONNECTOR_READ_PARTITION_NUM, true); - properties.validateInt(CONNECTOR_READ_FETCH_SIZE, true); - - Optional lowerBound = properties.getOptionalLong(CONNECTOR_READ_PARTITION_LOWER_BOUND); - Optional upperBound = properties.getOptionalLong(CONNECTOR_READ_PARTITION_UPPER_BOUND); - if (lowerBound.isPresent() && upperBound.isPresent()) { - Preconditions.checkArgument( - lowerBound.get() <= upperBound.get(), - CONNECTOR_READ_PARTITION_LOWER_BOUND - + " must not be larger than " - + CONNECTOR_READ_PARTITION_UPPER_BOUND); - } - - checkAllOrNone(properties, new String[] { - CONNECTOR_READ_PARTITION_COLUMN, - CONNECTOR_READ_PARTITION_LOWER_BOUND, - CONNECTOR_READ_PARTITION_UPPER_BOUND, - CONNECTOR_READ_PARTITION_NUM - }); - } - - private void validateLookupProperties(DescriptorProperties properties) { - properties.validateLong(CONNECTOR_LOOKUP_CACHE_MAX_ROWS, true); - properties.validateDuration(CONNECTOR_LOOKUP_CACHE_TTL, true, 1); - properties.validateInt(CONNECTOR_LOOKUP_MAX_RETRIES, true, 0); - - checkAllOrNone(properties, new String[] {CONNECTOR_LOOKUP_CACHE_MAX_ROWS, CONNECTOR_LOOKUP_CACHE_TTL}); - } - - private void validateSinkProperties(DescriptorProperties properties) { - properties.validateInt(CONNECTOR_WRITE_FLUSH_MAX_ROWS, true); - properties.validateDuration(CONNECTOR_WRITE_FLUSH_INTERVAL, true, 1); - properties.validateInt(CONNECTOR_WRITE_MAX_RETRIES, true); - } - - private void checkAllOrNone(DescriptorProperties properties, String[] propertyNames) { - int presentCount = 0; - for (String name : propertyNames) { - if (properties.getOptionalString(name).isPresent()) { - presentCount++; - } - } - Preconditions.checkArgument( - presentCount == 0 || presentCount == propertyNames.length, - "Either all or none of the following properties should be provided:\n" - + String.join("\n", propertyNames)); - } -} diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory deleted file mode 100644 index 38fe58c929..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.flink.connector.phoenix.table.PhoenixDynamicTableFactory \ No newline at end of file diff --git a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory b/dinky-connectors/dinky-connector-phoenix-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory deleted file mode 100644 index 922141c1e2..0000000000 --- a/dinky-connectors/dinky-connector-phoenix-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.flink.connector.phoenix.table.PhoenixTableSourceSinkFactory \ No newline at end of file diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/README.md b/dinky-connectors/dinky-connector-pulsar-1.14/README.md deleted file mode 100644 index efa0c8ae9a..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/README.md +++ /dev/null @@ -1,272 +0,0 @@ -## dinky-connector-pulsar - -> 概要说明: -> 实现依附:https://gitee.com/apache/flink/tree/release-1.14/flink-connectors -> * Flink 官方自1.14版本支持 Flink-pulsar-connector(目前未支持 Flink-sql) -> * 在此版本前,自主实现了Flink-pulsar-connector,本次Flink-sql的实现向官方Flink-connector-pulsar对齐,更好的兼容使用,实现性能最优! -> * 就生产经验,避坑处理 -> * 本次Pulsar版本使用版本:2.8.2 Flink版本:1.14.3 -> * Pulsar-connector应用广泛,在消息队列的使用中,FlinkSql的开发中具有总要作用意义。 - -## ★详情介绍 Pulsar-SQL Connector - -### Dependencies - -In order to use the Pulsar connector the following dependencies are required for both projects using a build automation tool (such as Maven or SBT) and SQL Client with SQL JAR bundles. - -* Maven dependency - -``` - - org.apache.flink - flink-connector-Pulsar_2.11 - 1.14.3 - -``` - -### How to create a Pulsar table - -``` -CREATE TABLE source_pulsar_n( - requestId VARCHAR, - `timestamp` BIGINT, - `date` VARCHAR, - appId VARCHAR, - appName VARCHAR, - forwardTimeMs VARCHAR, - processingTimeMs INT, - errCode VARCHAR, - userIp VARCHAR, - b_create_time as TO_TIMESTAMP(FROM_UNIXTIME(createTime/1000,'yyyy-MM-dd HH:mm:ss'),'yyyy-MM-dd HH:mm:ss') -) WITH ( - 'connector' = 'pulsar', - 'connector.version' = 'universal', - 'connector.topic' = 'persistent://dinky/dev/context.pulsar', - 'connector.service-url' = 'pulsar://pulsar-dinky-n.stream.com:6650', - 'connector.subscription-name' = 'tmp_print_detail', - 'connector.subscription-type' = 'Shared', - 'connector.subscription-initial-position' = 'Latest', - 'update-mode' = 'append', - 'format' = 'json', - 'format.derive-schema' = 'true' -); -``` - -### Data Type Mapping - -Pulsar stores message keys and values as bytes, so Pulsar doesn’t have schema or data types. The Pulsar messages are deserialized and serialized by formats, e.g. csv, json, avro. Thus, the data type mapping is determined by specific formats. Please refer to Formats pages for more details. - -### Connector Options - -| Option | Required | Default | Type | Description | -|-----------------------------------------|-------------------|---------|--------|--------------------------------------------------------------------------| -| connector | required | (none) | String | Specify what connector to use, for pulsar use `'pulsar'`. | -| connector.version | required | (none) | String | universal | -| connector.topic | required for sink | (none) | String | Topic name(s) to read data from when the table is used as source | -| connector.service-url | optional | (none) | String | The address of the pulsar | -| connector.subscription-name | required | (none) | String | The subscription name of the Pulsar | -| connector.subscription-type | required | (none) | String | A subscription model of the Pulsar【Shared、Exclusive、Key_Shared、Failover】 | -| connector.subscription-initial-position | required | (none) | String | initial-position[EARLIEST、LATEST、TIMESTAMP] | -| update-mode | optional | (none) | String | append or upsert | -| format | optional | (none) | String | json、csv...... | -| format.derive-schema | optional | (none) | String | ture or false | -| | | | | | - -## 🚀 快速上手 - -```shell -git clone https://github.com/DataLinkDC/dinky.git -cd dinky-connector/dinky-connector-pulsar-1.14 -mvn clean install -DskipTests -Dflink.version=$version -``` - -## 🎉 Features - -* Key and Value Formats - -Both the key and value part of a Pulsar record can be serialized to and deserialized from raw bytes using one of the given - -* Value Format - -Since a key is optional in Pulsar records, the following statement reads and writes records with a configured value format but without a key format. The 'format' option is a synonym for 'value.format'. All format options are prefixed with the format identifier. - -## 👻 使用 - -```sql --- Pulsar多集群形式, --- 此处分 n、b 两个集群 - ---声明数据源 -CREATE TABLE source_pulsar_n( - requestId VARCHAR, - `timestamp` BIGINT, - `date` VARCHAR, - appId VARCHAR, - appName VARCHAR, - forwardTimeMs VARCHAR, - processingTimeMs INT, - errCode VARCHAR, - userIp VARCHAR, - createTime BIGINT, - b_create_time as TO_TIMESTAMP(FROM_UNIXTIME(createTime/1000,'yyyy-MM-dd HH:mm:ss'),'yyyy-MM-dd HH:mm:ss') -) WITH ( - 'connector' = 'pulsar', - 'connector.version' = 'universal', - 'connector.topic' = 'persistent://dinky/dev/context.pulsar', - 'connector.service-url' = 'pulsar://pulsar-dinky-n.stream.com:6650', - 'connector.subscription-name' = 'tmp_print_detail', - 'connector.subscription-type' = 'Shared', - 'connector.subscription-initial-position' = 'Latest', - 'update-mode' = 'append', - 'format' = 'json', - 'format.derive-schema' = 'true' -); - - -CREATE TABLE source_pulsar_b( - requestId VARCHAR, - `timestamp` BIGINT, - `date` VARCHAR, - appId VARCHAR, - appName VARCHAR, - forwardTimeMs VARCHAR, - processingTimeMs INT, - errCode VARCHAR, - userIp VARCHAR, - createTime BIGINT, - b_create_im_time as TO_TIMESTAMP(FROM_UNIXTIME(createTime/1000,'yyyy-MM-dd HH:mm:ss'),'yyyy-MM-dd HH:mm:ss') -) WITH ( - 'connector' = 'pulsar', - 'connector.version' = 'universal', - 'connector.topic' = 'persistent://dinky/dev/context.pulsar', - 'connector.service-url' = 'pulsar://pulsar-dinky-b.stream.com:6650', - 'connector.subscription-name' = 'tmp_print_detail', - 'connector.subscription-type' = 'Shared', - 'connector.subscription-initial-position' = 'Latest', - 'update-mode' = 'append', - 'format' = 'json', - 'format.derive-schema' = 'true' -); - --- 合并数据源 -create view pulsar_source_all AS -select - requestId , - `timestamp`, - `date`, - appId, - appName, - forwardTimeMs, - processingTim, - errCode, - userIp, - b_create_time -from source_pulsar_n -union all -select - requestId , - `timestamp`, - `date`, - appId, - appName, - forwardTimeMs, - processingTim, - errCode, - userIp, - b_create_time -from source_pulsar_b; - --- 创建 sink -create table sink_pulsar_result( - requestId VARCHAR, - `timestamp` BIGINT, - `date` VARCHAR, - appId VARCHAR, - appName VARCHAR, - forwardTimeMs VARCHAR, - processingTimeMs INT, - errCode VARCHAR, - userIp VARCHAR -) with ( - 'connector' = 'print' -); - --- 执行逻辑 --- 查看 pulsar主题明细数据 -insert into sink_pulsar_result -select - requestId , - `timestamp`, - `date`, - appId, - appName, - forwardTimeMs, - processingTim, - errCode, - userIp, - b_create_time -from pulsar_source_all; - -``` - -### 介绍 - -与Kafka对比 - -| 对比方面 | Kafka | Pulsar | -|------|--------------------------------------------------------|--------------------------------------------------------------------|------| -| 模型概念 | producer – topic – consumer group – consumer | producer – topic -subsciption- consumer | Stri | -| 消费模式 | 主要集中在流(Stream) 模式, 对单个partition是独占消费, 没有共享(Queue)的消费模式 | 提供了统一的消息模型和API. 流(Stream) 模式 – 独占和故障切换订阅方式 ; 队列(Queue)模式 – 共享订阅的方式 | -| 消息确认 | 使用偏移量 offset for sink | 使用专门的cursor管理. 累积确认和kafka效果一样; 提供单条或选择性确认 | -| 消息保留 | 根据设置的保留期来删除消息, 有可能消息没被消费, 过期后被删除, 不支持TTL | 消息只有被所有订阅消费后才会删除, 不会丢失数据,. 也运行设置保留期, 保留被消费的数据 . 支持TTL | - -根本区别:Apache Pulsar和Apache Kafka之间的根本区别在于Apache Kafka是以分区为存储中心,而Apache Pulsar是以Segment为存储中心 - -性能对比:Pulsar性能比Kafka强许多,速度是Kafka的五倍,延迟降低了40% - -### Pulsar补充介绍(消息体) - -消息队列的读写...... - -核心概念 -3.1 Messages(消息) - -#### Value / data payload: - -消息携带的数据,所有 Pulsar 的消息携带原始 bytes,但是消息数据也需要遵循数据 schemas。 - -#### Key: - -消息可以被 Key 打标签。这可以对 topic 压缩之类的事情起作用。 - -#### Properties: - -可选的,用户定义属性的 key/value map。 - -#### Producer name: - -生产消息的 producer 的名称(producer 被自动赋予默认名称,但你也可以自己指定。) - -#### Sequence ID: - -在 topic 中,每个 Pulsar 消息属于一个有序的序列。消息的 sequence ID 是它在序列中的次序。 - -#### Publish time: - -消息发布的时间戳 - -#### Event time: - -可选的时间戳,应用可以附在消息上,代表某个事件发生的时间,例如,消息被处理时。如果没有明确的设置,那么 event time 为0。 - -#### TypedMessageBuilder: - -它用于构造消息。您可以使用TypedMessageBuilder设置消息属性,比如消息键、消息值。设置TypedMessageBuilder时,将键设置为字符串。如果您将键设置为其他类型,例如,AVRO对象,则键将作为字节发送,并且很难从消费者处取回AVRO对象。 - -### Subscriptions(订阅模式) - -* 1 Exclusive(独占模式) -* 2 Failover(灾备模式) -* 3 Shared(共享模式) -* 4 Key_Shared(Key 共享模式) - diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/pom.xml b/dinky-connectors/dinky-connector-pulsar-1.14/pom.xml deleted file mode 100644 index c72301c58b..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/pom.xml +++ /dev/null @@ -1,209 +0,0 @@ - - - - 4.0.0 - - org.dinky - dinky-connectors - ${revision} - ../pom.xml - - dinky-connector-pulsar-1.14 - - jar - - Dinky : Connector : Pulsar 1.14 - - - 1.2.7 - 2.4.1 - 1.4.3 - 2.10.2 - - - - - org.dinky - dinky-common - ${scope.runtime} - - - - org.apache.pulsar - pulsar-client - ${pulsar.version} - - - com.google.protobuf - protobuf-java - - - - - - - org.apache.pulsar - pulsar-client-api - ${pulsar.version} - - - - org.apache.flink - flink-connector-pulsar_${scala.binary.version} - ${flink.version} - - - - org.apache.hadoop - hadoop-minicluster - ${hadoop.version} - test - - - - jdk.tools - jdk.tools - - - - io.netty - * - - - - - - org.apache.hadoop - hadoop-hdfs - ${hadoop.version} - test-jar - test - - - - org.apache.flink - flink-test-utils_${scala.binary.version} - ${flink.version} - test - - - org.dinky - dinky-flink-1.14 - ${scope.runtime} - - - org.apache.flink - flink-table-planner_${scala.binary.version} - ${flink.version} - test-jar - test - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.0 - - ${target.java.version} - ${target.java.version} - UTF-8 - - - - - org.apache.maven.plugins - maven-assembly-plugin - - - jar-with-dependencies - - - - - - org.apache.maven.plugins - maven-shade-plugin - 3.2.1 - - false - - - - - shade - - package - - - - *:* - - META-INF/*.SF - META-INF/*.DSA - META-INF/*.RSA - - - - - - - - - - org.apache.maven.plugins - maven-antrun-plugin - 1.2 - - - copy-resources - - run - - package - - - - - - - - - - - - - - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - ${project.parent.parent.basedir}/build/extends - - - - - diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicSink.java b/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicSink.java deleted file mode 100644 index 125129e6d2..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicSink.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.dinky.connector.pulsar; - -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.api.common.serialization.SerializationSchema; -import org.apache.flink.table.connector.ChangelogMode; -import org.apache.flink.table.connector.format.EncodingFormat; -import org.apache.flink.table.connector.sink.DynamicTableSink; -import org.apache.flink.table.connector.sink.SinkFunctionProvider; -import org.apache.flink.table.data.RowData; -import org.apache.flink.table.types.DataType; -import org.apache.flink.types.RowKind; - -import java.util.Collections; -import java.util.List; -import java.util.Properties; - -/** * @version 1.0 * @Desc: */ - -/** A version-agnostic Pulsar {@link DynamicTableSink}. */ -@Internal -public class PulsarDynamicSink implements DynamicTableSink { - - // -------------------------------------------------------------------------------------------- - // Mutable attributes - // -------------------------------------------------------------------------------------------- - - /** Metadata that is appended at the end of a physical sink row. */ - protected List metadataKeys; - - // -------------------------------------------------------------------------------------------- - // Format attributes - // -------------------------------------------------------------------------------------------- - - /** Data type of consumed data type. */ - protected DataType consumedDataType; - - /** Data type to configure the formats. */ - protected final DataType physicalDataType; - - /** Optional format for encoding to Pulsar. */ - protected final EncodingFormat> encodingFormat; - - // -------------------------------------------------------------------------------------------- - // Pulsar-specific attributes - // -------------------------------------------------------------------------------------------- - - /** The Pulsar topic to write to. */ - protected final String topic; - - /** The Pulsar service url config. */ - protected final String serviceUrl; - - /** The Pulsar update mode to. */ - protected final String updateMode; - - /** Properties for the Pulsar producer. */ - protected final Properties pulsarProducerProperties; - - /** Properties for the Pulsar producer. */ - protected final Properties pulsarClientProperties; - - /** Properties for the Pulsar producer parallelism. */ - protected final Integer sinkParallelism; - - public PulsarDynamicSink( - DataType physicalDataType, - EncodingFormat> encodingFormat, - String topic, - String serviceUrl, - String updateMode, - Properties pulsarProducerProperties, - Properties pulsarClientProperties, - Integer sinkParallelism) { - // Format attributes - this.physicalDataType = checkNotNull(physicalDataType, "Physical data type must not be null."); - this.encodingFormat = encodingFormat; - // Mutable attributes - this.metadataKeys = Collections.emptyList(); - // Pulsar-specific attributes - this.topic = checkNotNull(topic, "Topic must not be null."); - this.serviceUrl = checkNotNull(serviceUrl, "Service url must not be null."); - this.updateMode = checkNotNull(updateMode, "Update mode must not be null."); - this.pulsarProducerProperties = - checkNotNull(pulsarProducerProperties, "pulsarProducerProperties must not be null."); - this.pulsarClientProperties = checkNotNull(pulsarClientProperties, "pulsarClientProperties must not be null."); - this.sinkParallelism = sinkParallelism; - } - - @Override - public ChangelogMode getChangelogMode(ChangelogMode requestedMode) { - if (updateMode.equals("append")) { - return ChangelogMode.newBuilder().addContainedKind(RowKind.INSERT).build(); - } else { - return ChangelogMode.newBuilder() - .addContainedKind(RowKind.INSERT) - .addContainedKind(RowKind.UPDATE_AFTER) - .build(); - } - } - - @Override - public SinkRuntimeProvider getSinkRuntimeProvider(Context context) { - SerializationSchema runtimeEncoder = encodingFormat.createRuntimeEncoder(context, physicalDataType); - - PulsarSinkFunction sinkFunction = new PulsarSinkFunction<>( - topic, serviceUrl, pulsarProducerProperties, pulsarClientProperties, runtimeEncoder); - // sink的并行度设置 - if (sinkParallelism != null) { - return SinkFunctionProvider.of(sinkFunction, sinkParallelism); - } else { - return SinkFunctionProvider.of(sinkFunction); - } - } - - @Override - public DynamicTableSink copy() { - final PulsarDynamicSink copy = new PulsarDynamicSink( - physicalDataType, - encodingFormat, - topic, - serviceUrl, - updateMode, - pulsarProducerProperties, - pulsarClientProperties, - sinkParallelism); - copy.metadataKeys = metadataKeys; - return copy; - } - - @Override - public String asSummaryString() { - return "Pulsar table sink"; - } -} diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicSource.java b/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicSource.java deleted file mode 100644 index 77583bde90..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicSource.java +++ /dev/null @@ -1,226 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.dinky.connector.pulsar; - -import static org.apache.flink.util.Preconditions.checkNotNull; - -import org.dinky.connector.pulsar.util.PulsarConnectorOptions; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.api.common.eventtime.WatermarkStrategy; -import org.apache.flink.api.common.serialization.DeserializationSchema; -import org.apache.flink.api.connector.source.Boundedness; -import org.apache.flink.configuration.Configuration; -import org.apache.flink.connector.pulsar.source.PulsarSource; -import org.apache.flink.connector.pulsar.source.PulsarSourceBuilder; -import org.apache.flink.connector.pulsar.source.PulsarSourceOptions; -import org.apache.flink.connector.pulsar.source.enumerator.cursor.StartCursor; -import org.apache.flink.connector.pulsar.source.reader.deserializer.PulsarDeserializationSchema; -import org.apache.flink.streaming.api.datastream.DataStream; -import org.apache.flink.streaming.api.datastream.DataStreamSource; -import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; -import org.apache.flink.table.api.TableException; -import org.apache.flink.table.connector.ChangelogMode; -import org.apache.flink.table.connector.format.DecodingFormat; -import org.apache.flink.table.connector.source.DataStreamScanProvider; -import org.apache.flink.table.connector.source.DynamicTableSource; -import org.apache.flink.table.connector.source.ScanTableSource; -import org.apache.flink.table.connector.source.abilities.SupportsWatermarkPushDown; -import org.apache.flink.table.data.RowData; -import org.apache.flink.table.types.DataType; -import org.apache.pulsar.client.api.SubscriptionType; - -import java.util.Map; -import java.util.Properties; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** * @version 1.0 * @Desc: */ - -/** A version-agnostic Pulsar {@link ScanTableSource}. */ -@Internal -public class PulsarDynamicSource implements ScanTableSource, SupportsWatermarkPushDown { - - private static final Logger LOG = LoggerFactory.getLogger(PulsarDynamicSource.class); - private final String serviceUrl; - private final String adminUrl; - private final String subscriptionName; - private final SubscriptionType subscriptionType; - private final PulsarConnectorOptions.ScanStartupMode startupMode; - private final String topic; - private final DecodingFormat> decodingFormat; - private final DataType producedDataType; - private final String tableIdentifier; - private final Properties properties; - private final Long timestamp; - private final Integer sourceParallelism; - - /** Watermark strategy that is used to generate per-partition watermark. */ - protected WatermarkStrategy watermarkStrategy; - - public PulsarDynamicSource( - String serviceUrl, - String adminUrl, - String subscriptionName, - SubscriptionType subscriptionType, - PulsarConnectorOptions.ScanStartupMode startupMode, - Long timestamp, - String topic, - DecodingFormat> decodingFormat, - DataType producedDataType, - String tableIdentifier, - Properties properties, - Integer sourceParallelism) { - this.serviceUrl = serviceUrl; - this.adminUrl = adminUrl; - this.subscriptionName = subscriptionName; - this.subscriptionType = subscriptionType; - this.startupMode = startupMode; - this.timestamp = timestamp; - this.topic = topic; - this.decodingFormat = decodingFormat; - this.producedDataType = producedDataType; - this.tableIdentifier = tableIdentifier; - this.properties = properties; - this.sourceParallelism = sourceParallelism; - this.watermarkStrategy = null; - } - - @Override - public ChangelogMode getChangelogMode() { - // in our example the format decides about the changelog mode - // but it could also be the source itself - return decodingFormat.getChangelogMode(); - } - - @Override - public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) { - - // create runtime classes that are shipped to the cluster - - final DeserializationSchema deserializer = - decodingFormat.createRuntimeDecoder(runtimeProviderContext, producedDataType); - - final PulsarSource pulsarSource = createPulsarSource(deserializer); - - return new DataStreamScanProvider() { - @Override - public DataStream produceDataStream(StreamExecutionEnvironment execEnv) { - if (watermarkStrategy == null) { - LOG.info("WatermarkStrategy 为空"); - watermarkStrategy = WatermarkStrategy.noWatermarks(); - } else { - LOG.info("WatermarkStrategy 不为空"); - } - - DataStreamSource rowDataDataStreamSource = - execEnv.fromSource(pulsarSource, watermarkStrategy, "PulsarSource-" + tableIdentifier); - - // 设置source并行度 - if (sourceParallelism != null) { - rowDataDataStreamSource.setParallelism(sourceParallelism); - } - return rowDataDataStreamSource; - } - - @Override - public boolean isBounded() { - return pulsarSource.getBoundedness() == Boundedness.BOUNDED; - } - }; - } - - @Override - public DynamicTableSource copy() { - return new PulsarDynamicSource( - serviceUrl, - adminUrl, - subscriptionName, - subscriptionType, - startupMode, - timestamp, - topic, - decodingFormat, - producedDataType, - tableIdentifier, - properties, - sourceParallelism); - } - - @Override - public String asSummaryString() { - return "Pulsar Table Source"; - } - - // --------------------------------------------------------------------------------------------- - protected PulsarSource createPulsarSource(DeserializationSchema deserializer) { - - final PulsarSourceBuilder pulsarSourceBuilder = PulsarSource.builder(); - - pulsarSourceBuilder - .setConfig(PulsarSourceOptions.PULSAR_ENABLE_AUTO_ACKNOWLEDGE_MESSAGE, true) - .setServiceUrl(serviceUrl) - .setAdminUrl(adminUrl) - .setTopics(topic) - .setDeserializationSchema(PulsarDeserializationSchema.flinkSchema(deserializer)) - .setConfig(Configuration.fromMap((Map) properties)) - .setSubscriptionName(subscriptionName); - - switch (subscriptionType) { - case Shared: - pulsarSourceBuilder.setSubscriptionType(SubscriptionType.Shared); - break; - case Exclusive: - pulsarSourceBuilder.setSubscriptionType(SubscriptionType.Exclusive); - break; - case Key_Shared: - pulsarSourceBuilder.setSubscriptionType(SubscriptionType.Key_Shared); - break; - case Failover: - pulsarSourceBuilder.setSubscriptionType(SubscriptionType.Failover); - break; - default: - throw new TableException("Unsupported subscriptionType. Validator should have checked that."); - } - - switch (startupMode) { - case EARLIEST: - pulsarSourceBuilder.setStartCursor(StartCursor.earliest()); - break; - case LATEST: - pulsarSourceBuilder.setStartCursor(StartCursor.latest()); - break; - case TIMESTAMP: - checkNotNull(timestamp, "No timestamp supplied."); - pulsarSourceBuilder.setStartCursor(StartCursor.fromMessageTime(timestamp)); - break; - default: - throw new TableException("Unsupported startup mode. Validator should have checked that."); - } - - return pulsarSourceBuilder.build(); - } - - @Override - public void applyWatermark(WatermarkStrategy watermarkStrategy) { - this.watermarkStrategy = watermarkStrategy; - } -} diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicTableFactory.java b/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicTableFactory.java deleted file mode 100644 index e258223eda..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarDynamicTableFactory.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.dinky.connector.pulsar; - -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.ADMIN_URL; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.DERIVE_SCHEMA; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.SERVICE_URL; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.SINK_PARALLELISM; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.SOURCE_PARALLELISM; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.SUBSCRIPTION_INITIAL_POSITION; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.SUBSCRIPTION_INITIAL_POSITION_TIMESTAMP; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.SUBSCRIPTION_NAME; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.SUBSCRIPTION_TYPE; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.ScanStartupMode; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.TOPIC; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.UPDATE_MODE; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptions.VERSION; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptionsUtil.PROPERTIES_CLIENT_PREFIX; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptionsUtil.PROPERTIES_PREFIX; -import static org.dinky.connector.pulsar.util.PulsarConnectorOptionsUtil.getPulsarProperties; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.api.common.serialization.DeserializationSchema; -import org.apache.flink.api.common.serialization.SerializationSchema; -import org.apache.flink.configuration.ConfigOption; -import org.apache.flink.configuration.ReadableConfig; -import org.apache.flink.table.api.ValidationException; -import org.apache.flink.table.catalog.CatalogTable; -import org.apache.flink.table.catalog.ObjectIdentifier; -import org.apache.flink.table.connector.format.DecodingFormat; -import org.apache.flink.table.connector.format.EncodingFormat; -import org.apache.flink.table.connector.format.Format; -import org.apache.flink.table.connector.sink.DynamicTableSink; -import org.apache.flink.table.connector.source.DynamicTableSource; -import org.apache.flink.table.data.RowData; -import org.apache.flink.table.factories.DeserializationFormatFactory; -import org.apache.flink.table.factories.DynamicTableSinkFactory; -import org.apache.flink.table.factories.DynamicTableSourceFactory; -import org.apache.flink.table.factories.FactoryUtil; -import org.apache.flink.table.factories.FactoryUtil.TableFactoryHelper; -import org.apache.flink.table.factories.SerializationFormatFactory; -import org.apache.flink.table.types.DataType; -import org.apache.pulsar.client.api.SubscriptionType; - -import java.util.HashSet; -import java.util.Properties; -import java.util.Set; - -import javax.annotation.Nullable; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Factory for creating configured instances of {@link PulsarDynamicSource} and { - * - *

* @version 1.0 * @Desc: - * - * @link PulsarDynamicSink}. - */ -@Internal -public class PulsarDynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory { - - private static final Logger LOG = LoggerFactory.getLogger(PulsarDynamicTableFactory.class); - - public static final String IDENTIFIER = "pulsar"; - - @Override - public String factoryIdentifier() { - return IDENTIFIER; - } - - @Override - public Set> requiredOptions() { - final Set> options = new HashSet<>(); - options.add(SERVICE_URL); - return options; - } - - @Override - public Set> optionalOptions() { - final Set> options = new HashSet<>(); - options.add(ADMIN_URL); - options.add(SUBSCRIPTION_NAME); - options.add(SUBSCRIPTION_TYPE); - options.add(SUBSCRIPTION_INITIAL_POSITION); - options.add(SUBSCRIPTION_INITIAL_POSITION_TIMESTAMP); - options.add(FactoryUtil.FORMAT); - options.add(TOPIC); - options.add(UPDATE_MODE); - options.add(SOURCE_PARALLELISM); - options.add(SINK_PARALLELISM); - options.add(VERSION); - options.add(DERIVE_SCHEMA); - - return options; - } - - @Override - public DynamicTableSource createDynamicTableSource(Context context) { - // either implement your custom validation logic here ... - // or use the provided helper utility - final TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); - - // discover a suitable decoding format - final DecodingFormat> decodingFormat = - helper.discoverDecodingFormat(DeserializationFormatFactory.class, FactoryUtil.FORMAT); - - // validate all options - // helper.validate(); - helper.validateExcept(PROPERTIES_PREFIX, PROPERTIES_CLIENT_PREFIX); - - // get the validated options - final ReadableConfig tableOptions = helper.getOptions(); - final String serviceUrl = tableOptions.get(SERVICE_URL); - final String adminUrl = tableOptions.get(ADMIN_URL); - final String subscriptionName = tableOptions.get(SUBSCRIPTION_NAME); - final SubscriptionType subscriptionType = tableOptions.get(SUBSCRIPTION_TYPE); - final ScanStartupMode startupMode = tableOptions.get(SUBSCRIPTION_INITIAL_POSITION); - final Long timestamp = tableOptions.get(SUBSCRIPTION_INITIAL_POSITION_TIMESTAMP); - final String topic = tableOptions.get(TOPIC); - final Integer sourceParallelism = tableOptions.get(SOURCE_PARALLELISM); - - // derive the produced data type (excluding computed columns) from the catalog table - final DataType producedDataType = - context.getCatalogTable().getResolvedSchema().toPhysicalRowDataType(); - - // create and return dynamic table source - return new PulsarDynamicSource( - serviceUrl, - adminUrl, - subscriptionName, - subscriptionType, - startupMode, - timestamp, - topic, - decodingFormat, - producedDataType, - context.getObjectIdentifier().asSummaryString(), - getPulsarProperties(context.getCatalogTable().getOptions(), PROPERTIES_PREFIX), - sourceParallelism); - } - - @Override - public DynamicTableSink createDynamicTableSink(Context context) { - final TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); - - final ReadableConfig tableOptions = helper.getOptions(); - final String update_mode = tableOptions.get(UPDATE_MODE); - final Integer sinkParallelism = tableOptions.get(SINK_PARALLELISM); - - helper.validateExcept(PROPERTIES_PREFIX, PROPERTIES_CLIENT_PREFIX); - - final EncodingFormat> encodingFormat = - helper.discoverEncodingFormat(SerializationFormatFactory.class, FactoryUtil.FORMAT); - - // 校验sql建表时是否指定主键约束 - // 我们一般使用flink自动推导出来的主键,不显式设置主键约束,所以这个校验方法暂时不使用 - // validatePKConstraints(update_mode, context.getObjectIdentifier(), - // context.getCatalogTable(), encodingFormat); - - final DataType physicalDataType = context.getCatalogTable().getSchema().toPhysicalRowDataType(); - - return createPulsarTableSink( - physicalDataType, - encodingFormat, - tableOptions.get(TOPIC), - tableOptions.get(SERVICE_URL), - update_mode, - getPulsarProperties(context.getCatalogTable().getOptions(), PROPERTIES_PREFIX), - getPulsarProperties(context.getCatalogTable().getOptions(), PROPERTIES_CLIENT_PREFIX), - sinkParallelism); - } - - // 校验sql建表时是否指定主键约束 - private static void validatePKConstraints( - @Nullable String updateMode, ObjectIdentifier tableName, CatalogTable catalogTable, Format format) { - - if (!updateMode.equals("append") && !updateMode.equals("upsert")) { - throw new ValidationException(String.format( - "The Pulsar table '%s' with update-mode should be 'append' or 'upsert'", - tableName.asSummaryString())); - } else if (catalogTable.getSchema().getPrimaryKey().isPresent() && updateMode.equals("append")) { - throw new ValidationException(String.format( - "The Pulsar table '%s' with append update-mode doesn't support" - + " defining PRIMARY KEY constraint on the table, because it can't" - + " guarantee the semantic of primary key.", - tableName.asSummaryString())); - } else if (!catalogTable.getSchema().getPrimaryKey().isPresent() && updateMode.equals("upsert")) { - throw new ValidationException("'upsert' tables require to define a PRIMARY KEY constraint. The PRIMARY KEY" - + " specifies which columns should be read from or write to the Pulsar" - + " message key. The PRIMARY KEY also defines records in the 'upsert'" - + " table should update or delete on which keys."); - } - } - - protected PulsarDynamicSink createPulsarTableSink( - DataType physicalDataType, - @Nullable EncodingFormat> encodingFormat, - String topic, - String serviceUrl, - String updateMode, - Properties pulsarProducerProperties, - Properties pulsarClientProperties, - Integer sinkParallelism) { - return new PulsarDynamicSink( - physicalDataType, - encodingFormat, - topic, - serviceUrl, - updateMode, - pulsarProducerProperties, - pulsarClientProperties, - sinkParallelism); - } -} diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarSinkFunction.java b/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarSinkFunction.java deleted file mode 100644 index 3ff8fa57ee..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/PulsarSinkFunction.java +++ /dev/null @@ -1,306 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.dinky.connector.pulsar; - -import org.dinky.connector.pulsar.util.PulsarConnectionHolder; -import org.dinky.connector.pulsar.util.PulsarProducerHolder; -import org.dinky.utils.JsonUtils; - -import org.apache.flink.annotation.Internal; -import org.apache.flink.api.common.functions.RuntimeContext; -import org.apache.flink.api.common.serialization.SerializationSchema; -import org.apache.flink.configuration.Configuration; -import org.apache.flink.runtime.state.FunctionInitializationContext; -import org.apache.flink.runtime.state.FunctionSnapshotContext; -import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; -import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; -import org.apache.flink.streaming.api.operators.StreamingRuntimeContext; -import org.apache.flink.util.ExceptionUtils; -import org.apache.flink.util.SerializableObject; -import org.apache.pulsar.PulsarVersion; -import org.apache.pulsar.client.api.ClientBuilder; -import org.apache.pulsar.client.api.CompressionType; -import org.apache.pulsar.client.api.HashingScheme; -import org.apache.pulsar.client.api.MessageId; -import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.ProducerBuilder; -import org.apache.pulsar.client.api.PulsarClient; -import org.apache.pulsar.client.api.TypedMessageBuilder; -import org.apache.pulsar.client.impl.PulsarClientImpl; - -import java.io.IOException; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.CompletableFuture; -import java.util.function.BiConsumer; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.fasterxml.jackson.databind.node.ObjectNode; - -/** - * The sink function for Pulsar. - * - *

* @version 1.0 * @Desc: - */ -@Internal -public class PulsarSinkFunction extends RichSinkFunction implements CheckpointedFunction { - - private static final long serialVersionUID = 1L; - private final Logger log = LoggerFactory.getLogger(PulsarSinkFunction.class); - - private final String topic; - private final String serviceUrl; - private final Properties pulsarProducerProperties; - private final Properties pulsarClientProperties; - private SerializationSchema runtimeEncoder; - private transient Producer producer; - private transient volatile boolean closed = false; - - /** - * Flag indicating whether to accept failures (and log them), or to fail on failures. Default is - * False. - */ - protected boolean logFailuresOnly; - - /** - * If true, the producer will wait until all outstanding records have been send to the broker. - * Default is True. - */ - protected boolean flushOnCheckpoint = true; - - /** The callback than handles error propagation or logging callbacks. */ - protected transient BiConsumer sendCallback; - - /** Errors encountered in the async producer are stored here. */ - protected transient volatile Exception asyncException; - - /** Lock for accessing the pending records. */ - protected final SerializableObject pendingRecordsLock = new SerializableObject(); - - /** Number of unacknowledged records. */ - protected long pendingRecords; - - public PulsarSinkFunction( - String topic, - String serviceUrl, - Properties pulsarProducerProperties, - Properties pulsarClientProperties, - SerializationSchema runtimeEncoder) { - this.topic = topic; - this.serviceUrl = serviceUrl; - this.pulsarProducerProperties = pulsarProducerProperties; - this.pulsarClientProperties = pulsarClientProperties; - this.runtimeEncoder = runtimeEncoder; - } - - @Override - public void open(Configuration parameters) throws Exception { - log.info("start open ..."); - try { - RuntimeContext ctx = getRuntimeContext(); - - log.info( - "Starting FlinkPulsarProducer ({}/{}) to produce into (※) pulsar topic {}", - ctx.getIndexOfThisSubtask() + 1, - ctx.getNumberOfParallelSubtasks(), - topic); - - this.producer = createReusedProducer(); - log.info("Pulsar producer has been created."); - - } catch (IOException ioe) { - log.error("Exception while creating connection to Pulsar.", ioe); - throw new RuntimeException("Cannot create connection to Pulsar.", ioe); - } catch (Exception ex) { - log.error("Exception while creating connection to Pulsar.", ex); - throw new RuntimeException("Cannot create connection to Pulsar.", ex); - } - - if (flushOnCheckpoint && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) { - log.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled." + " Disabling flushing."); - flushOnCheckpoint = false; - } - - if (logFailuresOnly) { - this.sendCallback = (t, u) -> { - if (u != null) { - log.error("Error while sending message to Pulsar: {}", ExceptionUtils.stringifyException(u)); - } - acknowledgeMessage(); - }; - } else { - this.sendCallback = (t, u) -> { - if (asyncException == null && u != null) { - asyncException = new Exception(u); - } - acknowledgeMessage(); - }; - } - log.info("end open."); - } - - @Override - public void invoke(T value, Context context) throws Exception { - log.info("start to invoke, send pular message."); - - // propagate asynchronous errors - checkErroneous(); - - byte[] serializeValue = runtimeEncoder.serialize(value); - String strValue = new String(serializeValue); - TypedMessageBuilder typedMessageBuilder = producer.newMessage(); - typedMessageBuilder.value(serializeValue); - typedMessageBuilder.key(getKey(strValue)); - - if (flushOnCheckpoint) { - synchronized (pendingRecordsLock) { - pendingRecords++; - } - } - - // 异步发送 - CompletableFuture messageIdCompletableFuture = typedMessageBuilder.sendAsync(); - messageIdCompletableFuture.whenComplete(sendCallback); - } - - @Override - public void close() throws Exception { - // 采用pulsar producer复用的方式,close方法不要具体实现,否则producer会被关闭 - log.error("PulsarProducerBase Class close function called"); - checkErroneous(); - } - - @Override - public void snapshotState(FunctionSnapshotContext context) throws Exception { - if (flushOnCheckpoint) { - synchronized (pendingRecordsLock) { - if (pendingRecords != 0) { - try { - log.info("等待notify"); - pendingRecordsLock.wait(); - checkErroneous(); - flush(); - log.info("等待waite之后"); - } catch (InterruptedException e) { - // this can be interrupted when the Task has been cancelled. - // by throwing an exception, we ensure that this checkpoint doesn't get - // confirmed - throw new IllegalStateException("Flushing got interrupted while checkpointing", e); - } - } - } - } - } - - @Override - public void initializeState(FunctionInitializationContext context) throws Exception { - // nothing to do. - } - - public String getKey(String strValue) { - // JSONObject jsonObject = JSONObject.parseObject(strValue); - // JSONObject jsonObject = JSONUtil.parseObject(strValue); - // String key = jsonObject.getString("key"); - ObjectNode jsonNodes = JsonUtils.parseObject(strValue); - String key = String.valueOf(jsonNodes.get("key")); - return key == null ? "" : key; - } - - // 获取Pulsar Producer - public Producer createProducer() throws Exception { - log.info("current pulsar version is {}", PulsarVersion.getVersion()); - - ClientBuilder builder = PulsarClient.builder(); - ProducerBuilder producerBuilder = builder.serviceUrl(serviceUrl) - .maxNumberOfRejectedRequestPerConnection(50) - .loadConf((Map) pulsarClientProperties) - .build() - .newProducer() - .topic(topic) - .blockIfQueueFull(Boolean.TRUE) - .compressionType(CompressionType.LZ4) - .hashingScheme(HashingScheme.JavaStringHash) - // .batchingMaxPublishDelay(100, TimeUnit.MILLISECONDS) - .loadConf((Map) pulsarProducerProperties); // 实现配置透传功能 - Producer producer = producerBuilder.create(); - return producer; - } - - // 获取复用的Pulsar Producer - public Producer createReusedProducer() throws Exception { - log.info("now create client, serviceUrl is : {}", serviceUrl); - PulsarClientImpl client = PulsarConnectionHolder.getProducerClient(serviceUrl, pulsarClientProperties); - - log.info("current pulsar version is {} , topic is : {}", PulsarVersion.getVersion(), topic); - - return PulsarProducerHolder.getProducer(topic, pulsarProducerProperties, client); - } - - /** - * Defines whether the producer should fail on errors, or only log them. If this is set to true, - * then exceptions will be only logged, if set to false, exceptions will be eventually thrown - * and cause the streaming program to fail (and enter recovery). - * - * @param logFailuresOnly The flag to indicate logging-only on exceptions. - */ - public void setLogFailuresOnly(boolean logFailuresOnly) { - this.logFailuresOnly = logFailuresOnly; - } - - /** - * If set to true, the Flink producer will wait for all outstanding messages in the Pulsar - * buffers to be acknowledged by the Pulsar producer on a checkpoint. This way, the producer can - * guarantee that messages in the Pulsar buffers are part of the checkpoint. - * - * @param flush Flag indicating the flushing mode (true = flush on checkpoint) - */ - public void setFlushOnCheckpoint(boolean flush) { - this.flushOnCheckpoint = flush; - } - - protected void checkErroneous() throws Exception { - Exception e = asyncException; - if (e != null) { - // prevent double throwing - asyncException = null; - throw new Exception("Failed to send data to Pulsar: " + e.getMessage(), e); - } - } - - private void acknowledgeMessage() { - if (flushOnCheckpoint) { - synchronized (pendingRecordsLock) { - log.info("pendingRecords:{}", pendingRecords); - pendingRecords--; - if (pendingRecords == 0) { - pendingRecordsLock.notifyAll(); - log.info("notify完成"); - } - } - } - } - - /** Flush pending records. */ - protected void flush() throws Exception { - producer.flush(); - } -} diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectionHolder.java b/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectionHolder.java deleted file mode 100644 index f97044b29b..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectionHolder.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.dinky.connector.pulsar.util; - -import org.apache.pulsar.client.api.PulsarClient; -import org.apache.pulsar.client.impl.PulsarClientImpl; - -import java.util.Map; -import java.util.Properties; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** * @version 1.0 * @Desc: */ -public class PulsarConnectionHolder { - private static final Logger LOG = LoggerFactory.getLogger(PulsarConnectionHolder.class); - private static final Map PULSAR_CLIENT_MAP = new ConcurrentHashMap<>(); - - public static PulsarClientImpl getConsumerClient(String serviceUrl, Properties properties) throws Exception { - return get(serviceUrl, true, properties); - } - - public static PulsarClientImpl getProducerClient(String serviceUrl, Properties properties) throws Exception { - return get(serviceUrl, false, properties); - } - - private static PulsarClientImpl get(String serviceUrl, boolean consumer, Properties properties) throws Exception { - synchronized (PulsarConnectionHolder.class) { - String pulsarClientCacheKey = getPulsarClientCacheKey(serviceUrl, consumer); - PulsarClientImpl pulsarClient = PULSAR_CLIENT_MAP.get(pulsarClientCacheKey); - if (null != pulsarClient) { - return pulsarClient; - } - - // return PULSAR_CLIENT_MAP.computeIfAbsent(pulsarClientCacheKey, serviceUrlTag -> - // createPulsarClient(serviceUrl)); - PulsarClientImpl pulsarClientImpl = createPulsarClient(serviceUrl, properties); - PulsarClientImpl newPulsarClientImpl = - PULSAR_CLIENT_MAP.putIfAbsent(pulsarClientCacheKey, pulsarClientImpl); - if (newPulsarClientImpl == null) { - return pulsarClientImpl; - } - return newPulsarClientImpl; - } - } - - private static String getPulsarClientCacheKey(String serviceUrl, boolean consumer) { - return serviceUrl + consumer; - } - - private static PulsarClientImpl createPulsarClient(String serviceUrl, Properties properties) { - try { - LOG.info("create client, and ID is " - + UUID.randomUUID() - + ", and cache map size is " - + PULSAR_CLIENT_MAP.size()); - - return (PulsarClientImpl) PulsarClient.builder() - .serviceUrl(serviceUrl) - .maxNumberOfRejectedRequestPerConnection(50) - .loadConf((Map) properties) - .build(); - } catch (Exception e) { - e.printStackTrace(); - throw new RuntimeException("创建PulsarClient失败", e); - } - } -} diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectorOptions.java b/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectorOptions.java deleted file mode 100644 index e5f516cad7..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectorOptions.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.dinky.connector.pulsar.util; - -import static org.apache.flink.configuration.description.TextElement.text; - -import org.apache.flink.annotation.PublicEvolving; -import org.apache.flink.configuration.ConfigOption; -import org.apache.flink.configuration.ConfigOptions; -import org.apache.flink.configuration.DescribedEnum; -import org.apache.flink.configuration.description.InlineElement; -import org.apache.pulsar.client.api.SubscriptionType; - -/** * @version 1.0 * @Desc: */ - -/** Options for the Pulsar connector. */ -@PublicEvolving -public class PulsarConnectorOptions { - - // -------------------------------------------------------------------------------------------- - // Format options - // -------------------------------------------------------------------------------------------- - public static final ConfigOption SERVICE_URL = ConfigOptions.key("connector.service-url") - .stringType() - .noDefaultValue() - .withDescription("Defines pulsar service url. "); - - public static final ConfigOption ADMIN_URL = ConfigOptions.key("connector.admin-url") - .stringType() - .defaultValue("http://pulsar-dinky-qa.dinky.com:8080") - .withDescription("Defines pulsar admin url. "); - - public static final ConfigOption TOPIC = ConfigOptions.key("connector.topic") - .stringType() - .noDefaultValue() - .withDescription("Defines pulsar topic. "); - - public static final ConfigOption SUBSCRIPTION_NAME = ConfigOptions.key("connector.subscription-name") - .stringType() - .noDefaultValue() - .withDescription("Defines pulsar subscription name. "); - - public static final ConfigOption SUBSCRIPTION_TYPE = ConfigOptions.key( - "connector.subscription-type") - .enumType(SubscriptionType.class) - .defaultValue(SubscriptionType.Shared) - .withDescription("Defines pulsar subscription type. "); - - public static final ConfigOption SUBSCRIPTION_INITIAL_POSITION = ConfigOptions.key( - "connector.subscription-initial-position") - .enumType(ScanStartupMode.class) - .defaultValue(ScanStartupMode.LATEST) - .withDescription("Startup mode for Pulsar consumer."); - - public static final ConfigOption SUBSCRIPTION_INITIAL_POSITION_TIMESTAMP = ConfigOptions.key( - "connector.subscription-initial-position.timestamp") - .longType() - .noDefaultValue() - .withDescription("Start from the specified message time by" + " Message.getPublishTime()."); - - public static final ConfigOption UPDATE_MODE = ConfigOptions.key("update-mode") - .stringType() - .noDefaultValue() - .withDescription("Defines pulsar update mode. "); - public static final ConfigOption SOURCE_PARALLELISM = ConfigOptions.key("source-parallelism") - .intType() - .noDefaultValue() - .withDescription("Defines pulsar sink parallelism. "); - public static final ConfigOption SINK_PARALLELISM = ConfigOptions.key("sink-parallelism") - .intType() - .noDefaultValue() - .withDescription("Defines pulsar sink parallelism. "); - - // 与老平台 1.14.3之前版本的sql进行兼容,但是并未使用的参数 - public static final ConfigOption VERSION = ConfigOptions.key("connector.version") - .stringType() - .noDefaultValue() - .withDescription("Defines pulsar version. "); - - // 与老平台 1.14.3之前版本的sql进行兼容,但是并未使用的参数 - public static final ConfigOption DERIVE_SCHEMA = ConfigOptions.key("format.derive-schema") - .stringType() - .noDefaultValue() - .withDescription("Defines pulsar derive schema. "); - - // -------------------------------------------------------------------------------------------- - // Enums - // -------------------------------------------------------------------------------------------- - - /** Startup mode for the Pulsar consumer, see {@link #SUBSCRIPTION_INITIAL_POSITION}. */ - public enum ScanStartupMode implements DescribedEnum { - EARLIEST("Earliest", text("Start from the earliest available message in the topic..")), - LATEST("Latest", text("Start from the latest available message in the topic.")), - TIMESTAMP("Timestamp", text("Start from the specified message time by Message.getPublishTime().")); - - private final String value; - private final InlineElement description; - - ScanStartupMode(String value, InlineElement description) { - this.value = value; - this.description = description; - } - - @Override - public String toString() { - return value; - } - - @Override - public InlineElement getDescription() { - return description; - } - } - - private PulsarConnectorOptions() {} -} diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectorOptionsUtil.java b/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectorOptionsUtil.java deleted file mode 100644 index 3cb292bbd4..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarConnectorOptionsUtil.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.dinky.connector.pulsar.util; - -import org.apache.flink.annotation.PublicEvolving; - -import java.util.Map; -import java.util.Properties; - -/** * @version 1.0 * @Desc: */ - -/** Utilities for {@link PulsarConnectorOptions}. */ -@PublicEvolving -public class PulsarConnectorOptionsUtil { - - // Prefix for Pulsar specific properties. - public static final String PROPERTIES_PREFIX = "properties."; - public static final String PROPERTIES_CLIENT_PREFIX = "properties_client."; - - public static Properties getPulsarProperties(Map tableOptions, String prefix) { - final Properties pulsarProperties = new Properties(); - - if (hasPulsarClientProperties(tableOptions)) { - tableOptions.keySet().stream().filter(key -> key.startsWith(prefix)).forEach(key -> { - final String value = tableOptions.get(key); - final String subKey = key.substring((prefix).length()); - pulsarProperties.put(subKey, value); - }); - } - return pulsarProperties; - } - - /** - * Decides if the table options contains Pulsar client properties that start with prefix - * 'properties'. - */ - private static boolean hasPulsarClientProperties(Map tableOptions) { - return tableOptions.keySet().stream().anyMatch(k -> k.startsWith(PROPERTIES_PREFIX)); - } - - private PulsarConnectorOptionsUtil() {} -} diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarProducerHolder.java b/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarProducerHolder.java deleted file mode 100644 index 79ff1eac0a..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/java/org/dinky/connector/pulsar/util/PulsarProducerHolder.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.dinky.connector.pulsar.util; - -import org.apache.pulsar.client.api.CompressionType; -import org.apache.pulsar.client.api.HashingScheme; -import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.ProducerBuilder; -import org.apache.pulsar.client.api.PulsarClient; - -import java.util.Map; -import java.util.Properties; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** * @version 1.0 * @Desc: */ -public class PulsarProducerHolder { - private static final Logger LOG = LoggerFactory.getLogger(PulsarProducerHolder.class); - private static final Map PULSAR_PRODUCER_MAP = new ConcurrentHashMap<>(); - - public static Producer getProducer(String defaultTopicName, Properties properties, PulsarClient client) - throws Exception { - return get(defaultTopicName, properties, client); - } - - private static Producer get(String defaultTopicName, Properties properties, PulsarClient client) throws Exception { - synchronized (PulsarProducerHolder.class) { - String pulsarProducerCacheKey = defaultTopicName; - Producer pulsarProducer = PULSAR_PRODUCER_MAP.get(pulsarProducerCacheKey); - LOG.info("get pulsarProducer from map result is " + pulsarProducer); - if (null != pulsarProducer) { - return pulsarProducer; - } - - Producer producer = createPulsarProducer(defaultTopicName, properties, client); - Producer newPulsarProducer = PULSAR_PRODUCER_MAP.putIfAbsent(pulsarProducerCacheKey, producer); - if (newPulsarProducer == null) { - return producer; - } - return newPulsarProducer; - } - } - - private static Producer createPulsarProducer(String defaultTopicName, Properties properties, PulsarClient client) { - try { - LOG.info("create producer, and ID is " - + UUID.randomUUID() - + ", and cache map size is " - + PULSAR_PRODUCER_MAP.size()); - LOG.info("now defaultTopicName is " - + defaultTopicName - + ", and map content is " - + PULSAR_PRODUCER_MAP.get(defaultTopicName)); - - ProducerBuilder producerBuilder = client.newProducer(); - producerBuilder - .blockIfQueueFull(Boolean.TRUE) - .compressionType(CompressionType.LZ4) - .topic(defaultTopicName) - .hashingScheme(HashingScheme.JavaStringHash) - . - // batchingMaxPublishDelay(100, TimeUnit.MILLISECONDS). - loadConf((Map) properties); - Producer producer = producerBuilder.create(); - return producer; - } catch (Exception e) { - e.printStackTrace(); - throw new RuntimeException("创建Producer失败", e); - } - } -} diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory b/dinky-connectors/dinky-connector-pulsar-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory deleted file mode 100644 index 161404b5f6..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.dinky.connector.pulsar.PulsarDynamicTableFactory diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/test/java/org/dinky/connector/pulsar/PulsarSqlCase.java b/dinky-connectors/dinky-connector-pulsar-1.14/src/test/java/org/dinky/connector/pulsar/PulsarSqlCase.java deleted file mode 100644 index b55709d737..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/test/java/org/dinky/connector/pulsar/PulsarSqlCase.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.dinky.connector.pulsar; - -import org.apache.flink.table.api.EnvironmentSettings; -import org.apache.flink.table.api.TableEnvironment; -import org.apache.flink.table.api.TableResult; - -import org.junit.Test; - -/** @version 1.0 @Desc: Test case */ -public class PulsarSqlCase { - - @Test - public void testCase() { - - EnvironmentSettings settings = - EnvironmentSettings.newInstance().inStreamingMode().build(); - TableEnvironment tableEnvironment = TableEnvironment.create(settings); - - tableEnvironment.executeSql("create table source_gen_data(\n" - + " f_sequence INT,\n" - + " f_random INT,\n" - + " f_random_str STRING,\n" - + " ts AS localtimestamp,\n" - + " WATERMARK FOR ts AS ts\n" - + ") WITH (\n" - + " 'connector' = 'datagen',\n" - + " -- optional options --\n" - + " 'rows-per-second'='5',\n" - + " 'fields.f_sequence.kind'='sequence',\n" - + " 'fields.f_sequence.start'='1',\n" - + " 'fields.f_sequence.end'='1000',\n" - + " 'fields.f_random.min'='1',\n" - + " 'fields.f_random.max'='1000',\n" - + " 'fields.f_random_str.length'='10'\n" - + ")"); - - tableEnvironment.executeSql("create table sink_table(\n" - + " f_sequence INT,\n" - + " f_random INT,\n" - + " f_random_str STRING,\n" - + " ts string\n" - + ") with (\n" - + " 'connector' = 'print'\n" - + ")"); - - TableResult tableResult = tableEnvironment.executeSql("insert into sink_table\n" - + "select\n" - + " f_sequence ,\n" - + " f_random ,\n" - + " f_random_str ,\n" - + " cast(ts as string)\n" - + "from source_gen_data"); - - tableResult.print(); - } - - @Test - public void pulsarTest() throws Exception { - EnvironmentSettings settings = - EnvironmentSettings.newInstance().inStreamingMode().build(); - TableEnvironment tableEnvironment = TableEnvironment.create(settings); - - tableEnvironment.executeSql("CREATE TABLE source_pulsar(\n" - + " requestId VARCHAR,\n" - + " `timestamp` BIGINT,\n" - + " `date` VARCHAR,\n" - + " appId VARCHAR,\n" - + " appName VARCHAR,\n" - + " forwardTimeMs VARCHAR,\n" - + " processingTimeMs INT,\n" - + " errCode VARCHAR,\n" - + " userIp VARCHAR,\n" - + " createTime bigint,\n" - + " b_create_time as TO_TIMESTAMP(FROM_UNIXTIME(createTime/1000,'yyyy-MM-dd" - + " HH:mm:ss'),'yyyy-MM-dd HH:mm:ss')\n" - + ") WITH (\n" - + " 'connector' = 'pulsar',\n" - + " 'connector.version' = 'universal',\n" - + " 'connector.topic' = 'persistent://dinky/dev/context.pulsar',\n" - + " 'connector.service-url' = 'pulsar://pulsar-dinky-n.stream.com:6650',\n" - + " 'connector.subscription-name' = 'tmp_print_detail',\n" - + " 'connector.subscription-type' = 'Shared',\n" - + " 'connector.subscription-initial-position' = 'Latest',\n" - + " 'update-mode' = 'append',\n" - + " 'format' = 'json',\n" - + " 'format.derive-schema' = 'true'\n" - + ")"); - - tableEnvironment.executeSql("create table sink_pulsar_result(\n" - + " requestId VARCHAR,\n" - + " `timestamp` BIGINT,\n" - + " `date` VARCHAR,\n" - + " appId VARCHAR,\n" - + " appName VARCHAR,\n" - + " forwardTimeMs VARCHAR,\n" - + " processingTimeMs INT,\n" - + " errCode VARCHAR,\n" - + " userIp VARCHAR\n" - + ") with (\n" - + " 'connector' = 'print'\n" - + ")"); - - TableResult tableResult = tableEnvironment.executeSql("insert into sink_pulsar_result\n" - + "select \n" - + " requestId ,\n" - + " `timestamp`,\n" - + " `date`,\n" - + " appId,\n" - + " appName,\n" - + " forwardTimeMs,\n" - + " processingTimeMs,\n" - + " errCode,\n" - + " userIp\n" - + "from source_pulsar"); - - tableResult.print(); - } -} diff --git a/dinky-connectors/dinky-connector-pulsar-1.14/src/test/resources/META-INF/services/org.apache.flink.table.factories.Factory b/dinky-connectors/dinky-connector-pulsar-1.14/src/test/resources/META-INF/services/org.apache.flink.table.factories.Factory deleted file mode 100644 index 161404b5f6..0000000000 --- a/dinky-connectors/dinky-connector-pulsar-1.14/src/test/resources/META-INF/services/org.apache.flink.table.factories.Factory +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.dinky.connector.pulsar.PulsarDynamicTableFactory diff --git a/dinky-connectors/pom.xml b/dinky-connectors/pom.xml index 41bad1935d..9f6fa7bbca 100644 --- a/dinky-connectors/pom.xml +++ b/dinky-connectors/pom.xml @@ -35,22 +35,13 @@ flink-1.14 dinky-connector-jdbc-1.14 - dinky-connector-phoenix-1.14 - dinky-connector-pulsar-1.14 - - flink-1.15 - - - flink-all dinky-connector-jdbc-1.14 - dinky-connector-phoenix-1.14 - dinky-connector-pulsar-1.14