diff --git a/.gitignore b/.gitignore index 3bb0e62122..587b74c703 100644 --- a/.gitignore +++ b/.gitignore @@ -62,4 +62,5 @@ tmp/* extends/* /.run/ -.idea \ No newline at end of file +.idea +*/tmp/* \ No newline at end of file diff --git a/dinky-client/dinky-client-1.14/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java b/dinky-client/dinky-client-1.14/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java index 22974fa224..10de93759f 100644 --- a/dinky-client/dinky-client-1.14/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java +++ b/dinky-client/dinky-client-1.14/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java @@ -19,6 +19,7 @@ package org.dinky.executor; +import org.dinky.data.exception.DinkyException; import org.dinky.data.model.LineageRel; import org.dinky.data.result.SqlExplainResult; import org.dinky.parser.CustomParserImpl; @@ -60,6 +61,7 @@ import org.apache.flink.table.operations.ModifyOperation; import org.apache.flink.table.operations.Operation; import org.apache.flink.table.operations.QueryOperation; +import org.apache.flink.table.operations.ddl.CreateTableASOperation; import org.apache.flink.table.planner.delegation.DefaultExecutor; import java.lang.reflect.Field; @@ -69,6 +71,10 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.stream.Collectors; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.node.ObjectNode; @@ -82,6 +88,8 @@ */ @Slf4j public class CustomTableEnvironmentImpl extends AbstractCustomTableEnvironment { + private static final Logger log = LoggerFactory.getLogger(CustomTableEnvironmentImpl.class); + private final CustomExtendedOperationExecutorImpl extendedExecutor = new CustomExtendedOperationExecutorImpl(this); private static final String UNSUPPORTED_QUERY_IN_EXECUTE_SQL_MSG = "Unsupported SQL query! executeSql() only accepts a single SQL statement of type " @@ -91,6 +99,8 @@ public class CustomTableEnvironmentImpl extends AbstractCustomTableEnvironment { + "CREATE VIEW, DROP VIEW, SHOW VIEWS, INSERT, DESCRIBE, LOAD MODULE, UNLOAD " + "MODULE, USE MODULES, SHOW [FULL] MODULES."; + private List modifyOperations = new ArrayList<>(); + public CustomTableEnvironmentImpl( CatalogManager catalogManager, ModuleManager moduleManager, @@ -199,29 +209,59 @@ private static Executor lookupExecutor( } } - @Override + public List getModifyOperations() { + return modifyOperations; + } + + public void addModifyOperations(ModifyOperation modifyOperation) { + if (modifyOperation instanceof CreateTableASOperation) { + CreateTableASOperation ctasOperation = (CreateTableASOperation) modifyOperation; + executeInternal(ctasOperation.getCreateTableOperation()); + modifyOperations.add(ctasOperation.getInsertOperation()); + } else { + modifyOperations.add(modifyOperation); + } + } + + public void addOperator(Transformation transformation) { + getStreamExecutionEnvironment().addOperator(transformation); + } + + public void clearModifyOperations() { + modifyOperations.clear(); + } + + public List> transOperatoinsToTransformation(List modifyOperations) { + return getPlanner().translate(modifyOperations); + } + public ObjectNode getStreamGraph(String statement) { List operations = super.getParser().parse(statement); if (operations.size() != 1) { throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query."); - } else { - List modifyOperations = new ArrayList<>(); - for (int i = 0; i < operations.size(); i++) { - if (operations.get(i) instanceof ModifyOperation) { - modifyOperations.add((ModifyOperation) operations.get(i)); - } - } - List> trans = getPlanner().translate(modifyOperations); - if (executor instanceof DefaultExecutor) { - StreamGraph streamGraph = - ((DefaultExecutor) executor).getExecutionEnvironment().generateStreamGraph(trans); - JSONGenerator jsonGenerator = new JSONGenerator(streamGraph); - String json = jsonGenerator.getJSON(); - return JsonUtils.parseObject(json); - } else { - throw new TableException("Unsupported SQL query! explainSql() need a single SQL to query."); - } } + + List modifyOperations = operations.stream() + .filter(ModifyOperation.class::isInstance) + .map(ModifyOperation.class::cast) + .collect(Collectors.toList()); + + StreamGraph streamGraph = transOperatoinsToStreamGraph(modifyOperations); + JSONGenerator jsonGenerator = new JSONGenerator(streamGraph); + return JsonUtils.parseObject(jsonGenerator.getJSON()); + } + + private StreamGraph transOperatoinsToStreamGraph(List modifyOperations) { + List> trans = getPlanner().translate(modifyOperations); + final StreamExecutionEnvironment environment = getStreamExecutionEnvironment(); + trans.forEach(environment::addOperator); + + StreamGraph streamGraph = environment.getStreamGraph(); + final Configuration configuration = getConfig().getConfiguration(); + if (configuration.containsKey(PipelineOptions.NAME.key())) { + streamGraph.setJobName(configuration.getString(PipelineOptions.NAME)); + } + return streamGraph; } @Override @@ -281,31 +321,106 @@ public StreamGraph getStreamGraphFromInserts(List statements) { } @Override + public Operation getOperationFromStatement(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + return operations.get(0); + } + + public ModifyOperation getModifyOperationFromInsert(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + return (ModifyOperation) operation; + } else if (operation instanceof QueryOperation) { + log.info("Select statement is skipped."); + return null; + } else { + throw new TableException("Only insert or select statement is supported now."); + } + } + + public StreamGraph getStreamGraph() { + return transOperatoinsToStreamGraph(modifyOperations); + } + + public StreamGraph getStreamGraphFromModifyOperations(List modifyOperations) { + List> trans = getPlanner().translate(modifyOperations); + for (Transformation transformation : trans) { + getStreamExecutionEnvironment().addOperator(transformation); + } + StreamGraph streamGraph = getStreamExecutionEnvironment().getStreamGraph(); + if (getConfig().getConfiguration().containsKey(PipelineOptions.NAME.key())) { + streamGraph.setJobName(getConfig().getConfiguration().getString(PipelineOptions.NAME)); + } + return streamGraph; + } + public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { - SqlExplainResult record = new SqlExplainResult(); List operations = getParser().parse(statement); - record.setParseTrue(true); if (operations.size() != 1) { - throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query."); + throw new DinkyException("Unsupported SQL explain! explainSql() only accepts a single SQL."); } - + SqlExplainResult record = new SqlExplainResult(); + if (operations.isEmpty()) { + throw new DinkyException("No statement is explained."); + } + record.setParseTrue(true); Operation operation = operations.get(0); if (operation instanceof ModifyOperation) { - record.setType("Modify DML"); + if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain(getPlanner().explain(operations, extraDetails)); + record.setType("DML"); + } } else if (operation instanceof ExplainOperation) { - record.setType("Explain DML"); + record.setExplain(operation.asSummaryString()); + record.setType("Explain"); } else if (operation instanceof QueryOperation) { - record.setType("Query DML"); + record.setExplain(getPlanner().explain(operations, extraDetails)); + record.setType("DQL"); } else { record.setExplain(operation.asSummaryString()); record.setType("DDL"); } record.setExplainTrue(true); - if ("DDL".equals(record.getType())) { - // record.setExplain("DDL语句不进行解释。"); - return record; + return record; + } + + public SqlExplainResult explainModifyOperations( + List modifyOperations, ExplainDetail... extraDetails) { + SqlExplainResult record = new SqlExplainResult(); + if (modifyOperations.isEmpty()) { + throw new DinkyException("No modify operation is explained."); + } + record.setParseTrue(true); + if (modifyOperations.size() == 1) { + Operation operation = modifyOperations.get(0); + if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain(getPlanner().explain(new ArrayList<>(modifyOperations), extraDetails)); + record.setType("DML"); + } + } else { + record.setExplain(getPlanner().explain(new ArrayList<>(modifyOperations), extraDetails)); + record.setType("Statement Set"); } - record.setExplain(getPlanner().explain(operations, extraDetails)); + record.setExplainTrue(true); return record; } diff --git a/dinky-client/dinky-client-1.15/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java b/dinky-client/dinky-client-1.15/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java index 9ca5fca07c..24e229585c 100644 --- a/dinky-client/dinky-client-1.15/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java +++ b/dinky-client/dinky-client-1.15/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java @@ -21,9 +21,11 @@ import static org.apache.flink.table.api.bridge.internal.AbstractStreamTableEnvironmentImpl.lookupExecutor; +import org.dinky.data.exception.DinkyException; import org.dinky.data.model.LineageRel; import org.dinky.data.result.SqlExplainResult; import org.dinky.parser.CustomParserImpl; +import org.dinky.utils.JsonUtils; import org.dinky.utils.LineageContext; import org.apache.flink.api.common.RuntimeExecutionMode; @@ -61,15 +63,18 @@ import org.apache.flink.table.operations.ModifyOperation; import org.apache.flink.table.operations.Operation; import org.apache.flink.table.operations.QueryOperation; +import org.apache.flink.table.operations.ddl.CreateTableASOperation; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.stream.Collectors; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import cn.hutool.core.collection.CollUtil; @@ -80,6 +85,8 @@ * @since 2022/05/08 */ public class CustomTableEnvironmentImpl extends AbstractCustomTableEnvironment { + private static final Logger log = LoggerFactory.getLogger(CustomTableEnvironmentImpl.class); + private final CustomExtendedOperationExecutorImpl extendedExecutor = new CustomExtendedOperationExecutorImpl(this); private static final String UNSUPPORTED_QUERY_IN_EXECUTE_SQL_MSG = "Unsupported SQL query! executeSql() only accepts a single SQL statement of type " @@ -89,6 +96,8 @@ public class CustomTableEnvironmentImpl extends AbstractCustomTableEnvironment { + "CREATE VIEW, DROP VIEW, SHOW VIEWS, INSERT, DESCRIBE, LOAD MODULE, UNLOAD " + "MODULE, USE MODULES, SHOW [FULL] MODULES."; + private List modifyOperations = new ArrayList<>(); + public CustomTableEnvironmentImpl( CatalogManager catalogManager, ModuleManager moduleManager, @@ -171,38 +180,59 @@ public static CustomTableEnvironmentImpl create( classLoader); } - @Override + public List getModifyOperations() { + return modifyOperations; + } + + public void addModifyOperations(ModifyOperation modifyOperation) { + if (modifyOperation instanceof CreateTableASOperation) { + CreateTableASOperation ctasOperation = (CreateTableASOperation) modifyOperation; + executeInternal(ctasOperation.getCreateTableOperation()); + modifyOperations.add(ctasOperation.toSinkModifyOperation(getCatalogManager())); + } else { + modifyOperations.add(modifyOperation); + } + } + + public void addOperator(Transformation transformation) { + getStreamExecutionEnvironment().addOperator(transformation); + } + + public void clearModifyOperations() { + modifyOperations.clear(); + } + + public List> transOperatoinsToTransformation(List modifyOperations) { + return getPlanner().translate(modifyOperations); + } + public ObjectNode getStreamGraph(String statement) { List operations = super.getParser().parse(statement); if (operations.size() != 1) { throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query."); - } else { - List modifyOperations = new ArrayList<>(); - for (int i = 0; i < operations.size(); i++) { - if (operations.get(i) instanceof ModifyOperation) { - modifyOperations.add((ModifyOperation) operations.get(i)); - } - } - List> trans = getPlanner().translate(modifyOperations); - for (Transformation transformation : trans) { - getStreamExecutionEnvironment().addOperator(transformation); - } - StreamGraph streamGraph = getStreamExecutionEnvironment().getStreamGraph(); - if (getConfig().getConfiguration().containsKey(PipelineOptions.NAME.key())) { - streamGraph.setJobName(getConfig().getConfiguration().getString(PipelineOptions.NAME)); - } - JSONGenerator jsonGenerator = new JSONGenerator(streamGraph); - String json = jsonGenerator.getJSON(); - ObjectMapper mapper = new ObjectMapper(); - ObjectNode objectNode = mapper.createObjectNode(); - try { - objectNode = (ObjectNode) mapper.readTree(json); - } catch (JsonProcessingException e) { - e.printStackTrace(); - } finally { - return objectNode; - } } + + List modifyOperations = operations.stream() + .filter(ModifyOperation.class::isInstance) + .map(ModifyOperation.class::cast) + .collect(Collectors.toList()); + + StreamGraph streamGraph = transOperatoinsToStreamGraph(modifyOperations); + JSONGenerator jsonGenerator = new JSONGenerator(streamGraph); + return JsonUtils.parseObject(jsonGenerator.getJSON()); + } + + private StreamGraph transOperatoinsToStreamGraph(List modifyOperations) { + List> trans = getPlanner().translate(modifyOperations); + final StreamExecutionEnvironment environment = getStreamExecutionEnvironment(); + trans.forEach(environment::addOperator); + + StreamGraph streamGraph = environment.getStreamGraph(); + final Configuration configuration = getConfig().getConfiguration(); + if (configuration.containsKey(PipelineOptions.NAME.key())) { + streamGraph.setJobName(configuration.getString(PipelineOptions.NAME)); + } + return streamGraph; } @Override @@ -256,32 +286,50 @@ public StreamGraph getStreamGraphFromInserts(List statements) { return streamGraph; } - @Override - public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { - SqlExplainResult record = new SqlExplainResult(); + public Operation getOperationFromStatement(String statement) { List operations = getParser().parse(statement); - record.setParseTrue(true); - if (operations.size() != 1) { - throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query."); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + return operations.get(0); + } + + public ModifyOperation getModifyOperationFromInsert(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); } Operation operation = operations.get(0); if (operation instanceof ModifyOperation) { - record.setType("Modify DML"); - } else if (operation instanceof ExplainOperation) { - record.setType("Explain DML"); + return (ModifyOperation) operation; } else if (operation instanceof QueryOperation) { - record.setType("Query DML"); + log.info("Select statement is skipped."); + return null; } else { - record.setExplain(operation.asSummaryString()); - record.setType("DDL"); + throw new TableException("Only insert or select statement is supported now."); } - record.setExplainTrue(true); - if ("DDL".equals(record.getType())) { - // record.setExplain("DDL语句不进行解释。"); - return record; + } + + public StreamGraph getStreamGraph() { + return transOperatoinsToStreamGraph(modifyOperations); + } + + public StreamGraph getStreamGraphFromModifyOperations(List modifyOperations) { + List> trans = getPlanner().translate(modifyOperations); + for (Transformation transformation : trans) { + getStreamExecutionEnvironment().addOperator(transformation); } - record.setExplain(getPlanner().explain(operations, extraDetails)); - return record; + StreamGraph streamGraph = getStreamExecutionEnvironment().getStreamGraph(); + if (getConfig().getConfiguration().containsKey(PipelineOptions.NAME.key())) { + streamGraph.setJobName(getConfig().getConfiguration().getString(PipelineOptions.NAME)); + } + return streamGraph; } @Override @@ -330,4 +378,61 @@ public TableResultInternal executeInternal(Operation operation) { } return super.executeInternal(operation); } + + public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { + List operations = getParser().parse(statement); + if (operations.size() != 1) { + throw new DinkyException("Unsupported SQL explain! explainSql() only accepts a single SQL."); + } + SqlExplainResult record = new SqlExplainResult(); + if (operations.isEmpty()) { + throw new DinkyException("No statement is explained."); + } + record.setParseTrue(true); + Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain(getPlanner().explain(operations, extraDetails)); + record.setType("DML"); + } + } else if (operation instanceof ExplainOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("Explain"); + } else if (operation instanceof QueryOperation) { + record.setExplain(getPlanner().explain(operations, extraDetails)); + record.setType("DQL"); + } else { + record.setExplain(operation.asSummaryString()); + record.setType("DDL"); + } + record.setExplainTrue(true); + return record; + } + + public SqlExplainResult explainModifyOperations( + List modifyOperations, ExplainDetail... extraDetails) { + SqlExplainResult record = new SqlExplainResult(); + if (modifyOperations.isEmpty()) { + throw new DinkyException("No modify operation is explained."); + } + record.setParseTrue(true); + if (modifyOperations.size() == 1) { + Operation operation = modifyOperations.get(0); + if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain(getPlanner().explain(new ArrayList<>(modifyOperations), extraDetails)); + record.setType("DML"); + } + } else { + record.setExplain(getPlanner().explain(new ArrayList<>(modifyOperations), extraDetails)); + record.setType("Statement Set"); + } + record.setExplainTrue(true); + return record; + } } diff --git a/dinky-client/dinky-client-1.16/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java b/dinky-client/dinky-client-1.16/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java index c03380a8ec..88999ac90d 100644 --- a/dinky-client/dinky-client-1.16/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java +++ b/dinky-client/dinky-client-1.16/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java @@ -19,6 +19,7 @@ package org.dinky.executor; +import org.dinky.data.exception.DinkyException; import org.dinky.data.result.SqlExplainResult; import org.dinky.parser.CustomParserImpl; import org.dinky.utils.JsonUtils; @@ -35,6 +36,7 @@ import org.apache.flink.table.api.ExplainDetail; import org.apache.flink.table.api.TableException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.operations.CreateTableASOperation; import org.apache.flink.table.operations.ExplainOperation; import org.apache.flink.table.operations.ModifyOperation; import org.apache.flink.table.operations.Operation; @@ -58,6 +60,8 @@ public class CustomTableEnvironmentImpl extends AbstractCustomTableEnvironment { private static final Logger log = LoggerFactory.getLogger(CustomTableEnvironmentImpl.class); + private List modifyOperations = new ArrayList<>(); + public CustomTableEnvironmentImpl(StreamTableEnvironment streamTableEnvironment) { super(streamTableEnvironment); injectParser(new CustomParserImpl(getPlanner().getParser())); @@ -88,6 +92,32 @@ public static CustomTableEnvironmentImpl create( return new CustomTableEnvironmentImpl(streamTableEnvironment); } + public List getModifyOperations() { + return modifyOperations; + } + + public void addModifyOperations(ModifyOperation modifyOperation) { + if (modifyOperation instanceof CreateTableASOperation) { + CreateTableASOperation ctasOperation = (CreateTableASOperation) modifyOperation; + executeInternal(ctasOperation.getCreateTableOperation()); + modifyOperations.add(ctasOperation.toSinkModifyOperation(getCatalogManager())); + } else { + modifyOperations.add(modifyOperation); + } + } + + public void addOperator(Transformation transformation) { + getStreamExecutionEnvironment().addOperator(transformation); + } + + public void clearModifyOperations() { + modifyOperations.clear(); + } + + public List> transOperatoinsToTransformation(List modifyOperations) { + return getPlanner().translate(modifyOperations); + } + public ObjectNode getStreamGraph(String statement) { List operations = super.getParser().parse(statement); if (operations.size() != 1) { @@ -139,32 +169,98 @@ public StreamGraph getStreamGraphFromInserts(List statements) { return transOperatoinsToStreamGraph(modifyOperations); } - public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { + public Operation getOperationFromStatement(String statement) { List operations = getParser().parse(statement); - if (operations.size() != 1) { - throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query."); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + return operations.get(0); + } + public ModifyOperation getModifyOperationFromInsert(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + return (ModifyOperation) operation; + } else if (operation instanceof QueryOperation) { + log.info("Select statement is skipped."); + return null; + } else { + throw new TableException("Only insert or select statement is supported now."); + } + } + + public StreamGraph getStreamGraph() { + return transOperatoinsToStreamGraph(modifyOperations); + } + + public StreamGraph getStreamGraphFromModifyOperations(List modifyOperations) { + return transOperatoinsToStreamGraph(modifyOperations); + } + + public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { + List operations = getParser().parse(statement); + if (operations.size() != 1) { + throw new DinkyException("Unsupported SQL explain! explainSql() only accepts a single SQL."); + } SqlExplainResult record = new SqlExplainResult(); + if (operations.isEmpty()) { + throw new DinkyException("No statement is explained."); + } record.setParseTrue(true); - record.setExplainTrue(true); - + Operation operation = operations.get(0); if (operation instanceof ModifyOperation) { - record.setType("Modify DML"); + if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain(getPlanner().explain(operations, extraDetails)); + record.setType("DML"); + } } else if (operation instanceof ExplainOperation) { - record.setType("Explain DML"); + record.setExplain(operation.asSummaryString()); + record.setType("Explain"); } else if (operation instanceof QueryOperation) { - record.setType("Query DML"); + record.setExplain(getPlanner().explain(operations, extraDetails)); + record.setType("DQL"); } else { record.setExplain(operation.asSummaryString()); record.setType("DDL"); - - // record.setExplain("DDL statement needn't comment。"); - return record; } + record.setExplainTrue(true); + return record; + } - record.setExplain(getPlanner().explain(operations, extraDetails)); + public SqlExplainResult explainModifyOperations( + List modifyOperations, ExplainDetail... extraDetails) { + SqlExplainResult record = new SqlExplainResult(); + if (modifyOperations.isEmpty()) { + throw new DinkyException("No modify operation is explained."); + } + record.setParseTrue(true); + if (modifyOperations.size() == 1) { + Operation operation = modifyOperations.get(0); + if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain(getPlanner().explain(new ArrayList<>(modifyOperations), extraDetails)); + record.setType("DML"); + } + } else { + record.setExplain(getPlanner().explain(new ArrayList<>(modifyOperations), extraDetails)); + record.setType("Statement Set"); + } + record.setExplainTrue(true); return record; } } diff --git a/dinky-client/dinky-client-1.17/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java b/dinky-client/dinky-client-1.17/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java index d4a02dea0f..8a7adfc800 100644 --- a/dinky-client/dinky-client-1.17/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java +++ b/dinky-client/dinky-client-1.17/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java @@ -20,23 +20,15 @@ package org.dinky.executor; import org.dinky.data.model.LineageRel; -import org.dinky.data.result.SqlExplainResult; import org.dinky.utils.LineageContext; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; -import org.apache.flink.table.api.ExplainDetail; -import org.apache.flink.table.api.ExplainFormat; import org.apache.flink.table.api.TableEnvironment; -import org.apache.flink.table.api.TableException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.table.api.bridge.java.internal.StreamTableEnvironmentImpl; import org.apache.flink.table.delegation.ExtendedOperationExecutor; import org.apache.flink.table.delegation.Planner; -import org.apache.flink.table.operations.ExplainOperation; -import org.apache.flink.table.operations.ModifyOperation; -import org.apache.flink.table.operations.Operation; -import org.apache.flink.table.operations.QueryOperation; import org.apache.flink.table.planner.delegation.PlannerBase; import java.util.List; @@ -93,36 +85,6 @@ public Configuration getRootConfiguration() { return (Configuration) this.getConfig().getRootConfiguration(); } - @Override - public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { - List operations = getParser().parse(statement); - if (operations.size() != 1) { - throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query."); - } - - Operation operation = operations.get(0); - SqlExplainResult data = new SqlExplainResult(); - data.setParseTrue(true); - data.setExplainTrue(true); - - if (operation instanceof ModifyOperation) { - data.setType("Modify DML"); - } else if (operation instanceof ExplainOperation) { - data.setType("Explain DML"); - } else if (operation instanceof QueryOperation) { - data.setType("Query DML"); - } else { - data.setExplain(operation.asSummaryString()); - data.setType("DDL"); - - // data.setExplain("DDL statement needn't comment。"); - return data; - } - - data.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); - return data; - } - @Override public List getLineage(String statement) { LineageContext lineageContext = new LineageContext(this); diff --git a/dinky-client/dinky-client-1.17/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java b/dinky-client/dinky-client-1.17/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java index 8ee3256aa7..306850b706 100644 --- a/dinky-client/dinky-client-1.17/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java +++ b/dinky-client/dinky-client-1.17/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java @@ -19,6 +19,8 @@ package org.dinky.executor; +import org.dinky.data.exception.DinkyException; +import org.dinky.data.result.SqlExplainResult; import org.dinky.parser.CustomParserImpl; import org.apache.flink.api.dag.Transformation; @@ -30,10 +32,15 @@ import org.apache.flink.streaming.api.graph.JSONGenerator; import org.apache.flink.streaming.api.graph.StreamGraph; import org.apache.flink.table.api.EnvironmentSettings; +import org.apache.flink.table.api.ExplainDetail; +import org.apache.flink.table.api.ExplainFormat; import org.apache.flink.table.api.TableException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.operations.CreateTableASOperation; +import org.apache.flink.table.operations.ExplainOperation; import org.apache.flink.table.operations.ModifyOperation; import org.apache.flink.table.operations.Operation; +import org.apache.flink.table.operations.QueryOperation; import java.util.ArrayList; import java.util.List; @@ -57,6 +64,8 @@ public class CustomTableEnvironmentImpl extends AbstractCustomTableEnvironment { private static final ObjectMapper mapper = new ObjectMapper(); + private List modifyOperations = new ArrayList<>(); + public CustomTableEnvironmentImpl(StreamTableEnvironment streamTableEnvironment) { super(streamTableEnvironment); injectParser(new CustomParserImpl(getPlanner().getParser())); @@ -87,6 +96,32 @@ public static CustomTableEnvironmentImpl create( return new CustomTableEnvironmentImpl(streamTableEnvironment); } + public List getModifyOperations() { + return modifyOperations; + } + + public void addModifyOperations(ModifyOperation modifyOperation) { + if (modifyOperation instanceof CreateTableASOperation) { + CreateTableASOperation ctasOperation = (CreateTableASOperation) modifyOperation; + executeInternal(ctasOperation.getCreateTableOperation()); + modifyOperations.add(ctasOperation.toSinkModifyOperation(getCatalogManager())); + } else { + modifyOperations.add(modifyOperation); + } + } + + public void addOperator(Transformation transformation) { + getStreamExecutionEnvironment().addOperator(transformation); + } + + public void clearModifyOperations() { + modifyOperations.clear(); + } + + public List> transOperatoinsToTransformation(List modifyOperations) { + return getPlanner().translate(modifyOperations); + } + @Override public ObjectNode getStreamGraph(String statement) { List operations = super.getParser().parse(statement); @@ -144,4 +179,101 @@ public StreamGraph getStreamGraphFromInserts(List statements) { return transOperatoinsToStreamGraph(modifyOperations); } + + public Operation getOperationFromStatement(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + return operations.get(0); + } + + public ModifyOperation getModifyOperationFromInsert(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + return (ModifyOperation) operation; + } else if (operation instanceof QueryOperation) { + log.info("Select statement is skipped."); + return null; + } else { + throw new TableException("Only insert or select statement is supported now."); + } + } + + public StreamGraph getStreamGraph() { + return transOperatoinsToStreamGraph(modifyOperations); + } + + public StreamGraph getStreamGraphFromModifyOperations(List modifyOperations) { + return transOperatoinsToStreamGraph(modifyOperations); + } + + public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { + List operations = getParser().parse(statement); + if (operations.size() != 1) { + throw new DinkyException("Unsupported SQL explain! explainSql() only accepts a single SQL."); + } + SqlExplainResult record = new SqlExplainResult(); + if (operations.isEmpty()) { + throw new DinkyException("No statement is explained."); + } + record.setParseTrue(true); + Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); + record.setType("DML"); + } + } else if (operation instanceof ExplainOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("Explain"); + } else if (operation instanceof QueryOperation) { + record.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); + record.setType("DQL"); + } else { + record.setExplain(operation.asSummaryString()); + record.setType("DDL"); + } + record.setExplainTrue(true); + return record; + } + + public SqlExplainResult explainModifyOperations( + List modifyOperations, ExplainDetail... extraDetails) { + SqlExplainResult record = new SqlExplainResult(); + if (modifyOperations.isEmpty()) { + throw new DinkyException("No modify operation is explained."); + } + record.setParseTrue(true); + if (modifyOperations.size() == 1) { + Operation operation = modifyOperations.get(0); + if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain( + getPlanner().explain(new ArrayList<>(modifyOperations), ExplainFormat.TEXT, extraDetails)); + record.setType("DML"); + } + } else { + record.setExplain( + getPlanner().explain(new ArrayList<>(modifyOperations), ExplainFormat.TEXT, extraDetails)); + record.setType("Statement Set"); + } + record.setExplainTrue(true); + return record; + } } diff --git a/dinky-client/dinky-client-1.18/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java b/dinky-client/dinky-client-1.18/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java index 6414271715..a3f0f4beb6 100644 --- a/dinky-client/dinky-client-1.18/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java +++ b/dinky-client/dinky-client-1.18/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java @@ -20,23 +20,17 @@ package org.dinky.executor; import org.dinky.data.model.LineageRel; -import org.dinky.data.result.SqlExplainResult; import org.dinky.utils.LineageContext; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; -import org.apache.flink.table.api.ExplainDetail; -import org.apache.flink.table.api.ExplainFormat; import org.apache.flink.table.api.TableEnvironment; -import org.apache.flink.table.api.TableException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.table.api.bridge.java.internal.StreamTableEnvironmentImpl; import org.apache.flink.table.delegation.Planner; -import org.apache.flink.table.operations.ExplainOperation; import org.apache.flink.table.operations.ModifyOperation; -import org.apache.flink.table.operations.Operation; -import org.apache.flink.table.operations.QueryOperation; +import java.util.ArrayList; import java.util.List; import cn.hutool.core.util.ReflectUtil; @@ -85,36 +79,6 @@ public Configuration getRootConfiguration() { return (Configuration) this.getConfig().getRootConfiguration(); } - @Override - public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { - List operations = getParser().parse(statement); - if (operations.size() != 1) { - throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query."); - } - - Operation operation = operations.get(0); - SqlExplainResult data = new SqlExplainResult(); - data.setParseTrue(true); - data.setExplainTrue(true); - - if (operation instanceof ModifyOperation) { - data.setType("Modify DML"); - } else if (operation instanceof ExplainOperation) { - data.setType("Explain DML"); - } else if (operation instanceof QueryOperation) { - data.setType("Query DML"); - } else { - data.setExplain(operation.asSummaryString()); - data.setType("DDL"); - - // data.setExplain("DDL statement needn't comment。"); - return data; - } - - data.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); - return data; - } - @Override public List getLineage(String statement) { LineageContext lineageContext = new LineageContext(this); diff --git a/dinky-client/dinky-client-1.18/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java b/dinky-client/dinky-client-1.18/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java index 530f772c6d..4ba6a2c523 100644 --- a/dinky-client/dinky-client-1.18/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java +++ b/dinky-client/dinky-client-1.18/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java @@ -19,10 +19,14 @@ package org.dinky.executor; +import org.dinky.data.exception.DinkyException; +import org.dinky.data.result.SqlExplainResult; import org.dinky.operations.CustomNewParserImpl; +import org.apache.flink.api.common.RuntimeExecutionMode; import org.apache.flink.api.dag.Transformation; import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.ExecutionOptions; import org.apache.flink.configuration.PipelineOptions; import org.apache.flink.runtime.jobgraph.jsonplan.JsonPlanGenerator; import org.apache.flink.runtime.rest.messages.JobPlanInfo; @@ -30,14 +34,38 @@ import org.apache.flink.streaming.api.graph.JSONGenerator; import org.apache.flink.streaming.api.graph.StreamGraph; import org.apache.flink.table.api.EnvironmentSettings; +import org.apache.flink.table.api.ExplainDetail; +import org.apache.flink.table.api.ExplainFormat; +import org.apache.flink.table.api.TableConfig; import org.apache.flink.table.api.TableException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.config.TableConfigOptions; +import org.apache.flink.table.catalog.Catalog; import org.apache.flink.table.catalog.CatalogDescriptor; +import org.apache.flink.table.catalog.ContextResolvedTable; +import org.apache.flink.table.catalog.ObjectIdentifier; +import org.apache.flink.table.catalog.ResolvedCatalogTable; +import org.apache.flink.table.catalog.StagedTable; +import org.apache.flink.table.connector.sink.DynamicTableSink; +import org.apache.flink.table.connector.sink.SinkStagingContext; +import org.apache.flink.table.connector.sink.abilities.SupportsStaging; +import org.apache.flink.table.execution.StagingSinkJobStatusHook; +import org.apache.flink.table.factories.TableFactoryUtil; +import org.apache.flink.table.module.Module; +import org.apache.flink.table.module.ModuleManager; +import org.apache.flink.table.operations.CreateTableASOperation; +import org.apache.flink.table.operations.ExplainOperation; import org.apache.flink.table.operations.ModifyOperation; import org.apache.flink.table.operations.Operation; +import org.apache.flink.table.operations.QueryOperation; +import org.apache.flink.table.operations.ReplaceTableAsOperation; +import org.apache.flink.table.operations.ddl.CreateTableOperation; +import org.apache.flink.table.operations.utils.ExecutableOperationUtils; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.stream.Collectors; import org.slf4j.Logger; @@ -58,6 +86,8 @@ public class CustomTableEnvironmentImpl extends AbstractCustomTableEnvironment { private static final ObjectMapper mapper = new ObjectMapper(); + private List modifyOperations = new ArrayList<>(); + public CustomTableEnvironmentImpl(StreamTableEnvironment streamTableEnvironment) { super(streamTableEnvironment); injectParser(new CustomNewParserImpl(this, getPlanner().getParser())); @@ -87,6 +117,134 @@ public static CustomTableEnvironmentImpl create( return new CustomTableEnvironmentImpl(streamTableEnvironment); } + public List getModifyOperations() { + return modifyOperations; + } + + public void addModifyOperations(ModifyOperation modifyOperation) { + if (modifyOperation instanceof CreateTableASOperation) { + modifyOperations.add(getModifyOperation((CreateTableASOperation) modifyOperation)); + } else if (modifyOperation instanceof ReplaceTableAsOperation) { + modifyOperations.add(getModifyOperation((ReplaceTableAsOperation) modifyOperation)); + } else { + modifyOperations.add(modifyOperation); + } + } + + private ModifyOperation getModifyOperation(CreateTableASOperation ctasOperation) { + CreateTableOperation createTableOperation = ctasOperation.getCreateTableOperation(); + ObjectIdentifier tableIdentifier = createTableOperation.getTableIdentifier(); + Catalog catalog = getCatalogManager().getCatalogOrThrowException(tableIdentifier.getCatalogName()); + ResolvedCatalogTable catalogTable = + getCatalogManager().resolveCatalogTable(createTableOperation.getCatalogTable()); + Optional stagingDynamicTableSink = + getSupportsStagingDynamicTableSink(createTableOperation, catalog, catalogTable); + if (stagingDynamicTableSink.isPresent()) { + // use atomic ctas + DynamicTableSink dynamicTableSink = stagingDynamicTableSink.get(); + SupportsStaging.StagingPurpose stagingPurpose = createTableOperation.isIgnoreIfExists() + ? SupportsStaging.StagingPurpose.CREATE_TABLE_AS_IF_NOT_EXISTS + : SupportsStaging.StagingPurpose.CREATE_TABLE_AS; + StagedTable stagedTable = + ((SupportsStaging) dynamicTableSink).applyStaging(new SinkStagingContext(stagingPurpose)); + StagingSinkJobStatusHook stagingSinkJobStatusHook = new StagingSinkJobStatusHook(stagedTable); + return ctasOperation.toStagedSinkModifyOperation(tableIdentifier, catalogTable, catalog, dynamicTableSink); + } + // use non-atomic ctas, create table first + executeInternal(createTableOperation); + return ctasOperation.toSinkModifyOperation(getCatalogManager()); + } + + private ModifyOperation getModifyOperation(ReplaceTableAsOperation rtasOperation) { + CreateTableOperation createTableOperation = rtasOperation.getCreateTableOperation(); + ObjectIdentifier tableIdentifier = createTableOperation.getTableIdentifier(); + // First check if the replacedTable exists + Optional replacedTable = getCatalogManager().getTable(tableIdentifier); + if (!rtasOperation.isCreateOrReplace() && !replacedTable.isPresent()) { + throw new TableException(String.format( + "The table %s to be replaced doesn't exist. " + + "You can try to use CREATE TABLE AS statement or " + + "CREATE OR REPLACE TABLE AS statement.", + tableIdentifier)); + } + Catalog catalog = getCatalogManager().getCatalogOrThrowException(tableIdentifier.getCatalogName()); + ResolvedCatalogTable catalogTable = + getCatalogManager().resolveCatalogTable(createTableOperation.getCatalogTable()); + Optional stagingDynamicTableSink = + getSupportsStagingDynamicTableSink(createTableOperation, catalog, catalogTable); + if (stagingDynamicTableSink.isPresent()) { + // use atomic rtas + DynamicTableSink dynamicTableSink = stagingDynamicTableSink.get(); + SupportsStaging.StagingPurpose stagingPurpose = rtasOperation.isCreateOrReplace() + ? SupportsStaging.StagingPurpose.CREATE_OR_REPLACE_TABLE_AS + : SupportsStaging.StagingPurpose.REPLACE_TABLE_AS; + + StagedTable stagedTable = + ((SupportsStaging) dynamicTableSink).applyStaging(new SinkStagingContext(stagingPurpose)); + StagingSinkJobStatusHook stagingSinkJobStatusHook = new StagingSinkJobStatusHook(stagedTable); + return rtasOperation.toStagedSinkModifyOperation(tableIdentifier, catalogTable, catalog, dynamicTableSink); + } + // non-atomic rtas drop table first if exists, then create + if (replacedTable.isPresent()) { + getCatalogManager().dropTable(tableIdentifier, false); + } + executeInternal(createTableOperation); + return rtasOperation.toSinkModifyOperation(getCatalogManager()); + } + + private Optional getSupportsStagingDynamicTableSink( + CreateTableOperation createTableOperation, Catalog catalog, ResolvedCatalogTable catalogTable) { + TableConfig tableConfig = getTableEnvironment().getConfig(); + boolean isStreamingMode = true; + RuntimeExecutionMode runtimeExecutionMode = + getStreamExecutionEnvironment().getConfiguration().get(ExecutionOptions.RUNTIME_MODE); + if (RuntimeExecutionMode.BATCH.equals(runtimeExecutionMode)) { + isStreamingMode = false; + } + if (tableConfig.get(TableConfigOptions.TABLE_RTAS_CTAS_ATOMICITY_ENABLED)) { + if (!TableFactoryUtil.isLegacyConnectorOptions( + catalog, + tableConfig, + isStreamingMode, + createTableOperation.getTableIdentifier(), + catalogTable, + createTableOperation.isTemporary())) { + try { + DynamicTableSink dynamicTableSink = ExecutableOperationUtils.createDynamicTableSink( + catalog, + () -> (new ModuleManager()).getFactory((Module::getTableSinkFactory)), + createTableOperation.getTableIdentifier(), + catalogTable, + Collections.emptyMap(), + tableConfig, + getUserClassLoader(), + createTableOperation.isTemporary()); + if (dynamicTableSink instanceof SupportsStaging) { + return Optional.of(dynamicTableSink); + } + } catch (Exception e) { + throw new TableException( + String.format( + "Fail to create DynamicTableSink for the table %s, " + + "maybe the table does not support atomicity of CTAS/RTAS, " + + "please set %s to false and try again.", + createTableOperation.getTableIdentifier(), + TableConfigOptions.TABLE_RTAS_CTAS_ATOMICITY_ENABLED.key()), + e); + } + } + } + return Optional.empty(); + } + + public void addOperator(Transformation transformation) { + getStreamExecutionEnvironment().addOperator(transformation); + } + + public void clearModifyOperations() { + modifyOperations.clear(); + } + @Override public ObjectNode getStreamGraph(String statement) { List operations = super.getParser().parse(statement); @@ -122,6 +280,10 @@ private StreamGraph transOperatoinsToStreamGraph(List modifyOpe return streamGraph; } + public List> transOperatoinsToTransformation(List modifyOperations) { + return getPlanner().translate(modifyOperations); + } + @Override public JobPlanInfo getJobPlanInfo(List statements) { return new JobPlanInfo(JsonPlanGenerator.generatePlan(getJobGraphFromInserts(statements))); @@ -145,8 +307,111 @@ public StreamGraph getStreamGraphFromInserts(List statements) { return transOperatoinsToStreamGraph(modifyOperations); } + public Operation getOperationFromStatement(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + return operations.get(0); + } + + public ModifyOperation getModifyOperationFromInsert(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + return (ModifyOperation) operation; + } else if (operation instanceof QueryOperation) { + log.info("Select statement is skipped."); + return null; + } else { + throw new TableException("Only insert or select statement is supported now."); + } + } + + public StreamGraph getStreamGraph() { + return transOperatoinsToStreamGraph(modifyOperations); + } + + public StreamGraph getStreamGraphFromModifyOperations(List modifyOperations) { + return transOperatoinsToStreamGraph(modifyOperations); + } + @Override public void createCatalog(String catalogName, CatalogDescriptor catalogDescriptor) { getCatalogManager().createCatalog(catalogName, catalogDescriptor); } + + public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { + List operations = getParser().parse(statement); + if (operations.size() != 1) { + throw new DinkyException("Unsupported SQL explain! explainSql() only accepts a single SQL."); + } + SqlExplainResult record = new SqlExplainResult(); + if (operations.isEmpty()) { + throw new DinkyException("No statement is explained."); + } + record.setParseTrue(true); + Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + if (operation instanceof ReplaceTableAsOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("RTAS"); + } else if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); + record.setType("DML"); + } + } else if (operation instanceof ExplainOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("Explain"); + } else if (operation instanceof QueryOperation) { + record.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); + record.setType("DQL"); + } else { + record.setExplain(operation.asSummaryString()); + record.setType("DDL"); + } + record.setExplainTrue(true); + return record; + } + + public SqlExplainResult explainModifyOperations( + List modifyOperations, ExplainDetail... extraDetails) { + SqlExplainResult record = new SqlExplainResult(); + if (modifyOperations.isEmpty()) { + throw new DinkyException("No modify operation is explained."); + } + record.setParseTrue(true); + if (modifyOperations.size() == 1) { + Operation operation = modifyOperations.get(0); + if (operation instanceof ReplaceTableAsOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("RTAS"); + } else if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain( + getPlanner().explain(new ArrayList<>(modifyOperations), ExplainFormat.TEXT, extraDetails)); + record.setType("DML"); + } + } else { + record.setExplain( + getPlanner().explain(new ArrayList<>(modifyOperations), ExplainFormat.TEXT, extraDetails)); + record.setType("Statement Set"); + } + record.setExplainTrue(true); + return record; + } } diff --git a/dinky-client/dinky-client-1.19/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java b/dinky-client/dinky-client-1.19/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java index 6414271715..cf1724512f 100644 --- a/dinky-client/dinky-client-1.19/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java +++ b/dinky-client/dinky-client-1.19/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java @@ -20,22 +20,14 @@ package org.dinky.executor; import org.dinky.data.model.LineageRel; -import org.dinky.data.result.SqlExplainResult; import org.dinky.utils.LineageContext; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; -import org.apache.flink.table.api.ExplainDetail; -import org.apache.flink.table.api.ExplainFormat; import org.apache.flink.table.api.TableEnvironment; -import org.apache.flink.table.api.TableException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.table.api.bridge.java.internal.StreamTableEnvironmentImpl; import org.apache.flink.table.delegation.Planner; -import org.apache.flink.table.operations.ExplainOperation; -import org.apache.flink.table.operations.ModifyOperation; -import org.apache.flink.table.operations.Operation; -import org.apache.flink.table.operations.QueryOperation; import java.util.List; @@ -85,36 +77,6 @@ public Configuration getRootConfiguration() { return (Configuration) this.getConfig().getRootConfiguration(); } - @Override - public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { - List operations = getParser().parse(statement); - if (operations.size() != 1) { - throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query."); - } - - Operation operation = operations.get(0); - SqlExplainResult data = new SqlExplainResult(); - data.setParseTrue(true); - data.setExplainTrue(true); - - if (operation instanceof ModifyOperation) { - data.setType("Modify DML"); - } else if (operation instanceof ExplainOperation) { - data.setType("Explain DML"); - } else if (operation instanceof QueryOperation) { - data.setType("Query DML"); - } else { - data.setExplain(operation.asSummaryString()); - data.setType("DDL"); - - // data.setExplain("DDL statement needn't comment。"); - return data; - } - - data.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); - return data; - } - @Override public List getLineage(String statement) { LineageContext lineageContext = new LineageContext(this); diff --git a/dinky-client/dinky-client-1.19/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java b/dinky-client/dinky-client-1.19/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java index 74c58c0d64..613607073b 100644 --- a/dinky-client/dinky-client-1.19/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java +++ b/dinky-client/dinky-client-1.19/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java @@ -19,10 +19,14 @@ package org.dinky.executor; +import org.dinky.data.exception.DinkyException; +import org.dinky.data.result.SqlExplainResult; import org.dinky.operations.CustomNewParserImpl; +import org.apache.flink.api.common.RuntimeExecutionMode; import org.apache.flink.api.dag.Transformation; import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.ExecutionOptions; import org.apache.flink.configuration.PipelineOptions; import org.apache.flink.runtime.jobgraph.jsonplan.JsonPlanGenerator; import org.apache.flink.runtime.rest.messages.JobPlanInfo; @@ -30,16 +34,40 @@ import org.apache.flink.streaming.api.graph.JSONGenerator; import org.apache.flink.streaming.api.graph.StreamGraph; import org.apache.flink.table.api.EnvironmentSettings; +import org.apache.flink.table.api.ExplainDetail; +import org.apache.flink.table.api.ExplainFormat; +import org.apache.flink.table.api.TableConfig; import org.apache.flink.table.api.TableException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.config.TableConfigOptions; import org.apache.flink.table.api.internal.CachedPlan; import org.apache.flink.table.api.internal.TableResultInternal; +import org.apache.flink.table.catalog.Catalog; import org.apache.flink.table.catalog.CatalogDescriptor; +import org.apache.flink.table.catalog.ContextResolvedTable; +import org.apache.flink.table.catalog.ObjectIdentifier; +import org.apache.flink.table.catalog.ResolvedCatalogTable; +import org.apache.flink.table.catalog.StagedTable; +import org.apache.flink.table.connector.sink.DynamicTableSink; +import org.apache.flink.table.connector.sink.SinkStagingContext; +import org.apache.flink.table.connector.sink.abilities.SupportsStaging; +import org.apache.flink.table.execution.StagingSinkJobStatusHook; +import org.apache.flink.table.factories.TableFactoryUtil; +import org.apache.flink.table.module.Module; +import org.apache.flink.table.module.ModuleManager; +import org.apache.flink.table.operations.CreateTableASOperation; +import org.apache.flink.table.operations.ExplainOperation; import org.apache.flink.table.operations.ModifyOperation; import org.apache.flink.table.operations.Operation; +import org.apache.flink.table.operations.QueryOperation; +import org.apache.flink.table.operations.ReplaceTableAsOperation; +import org.apache.flink.table.operations.ddl.CreateTableOperation; +import org.apache.flink.table.operations.utils.ExecutableOperationUtils; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.stream.Collectors; import org.slf4j.Logger; @@ -60,6 +88,8 @@ public class CustomTableEnvironmentImpl extends AbstractCustomTableEnvironment { private static final ObjectMapper mapper = new ObjectMapper(); + private List modifyOperations = new ArrayList<>(); + public CustomTableEnvironmentImpl(StreamTableEnvironment streamTableEnvironment) { super(streamTableEnvironment); injectParser(new CustomNewParserImpl(this, getPlanner().getParser())); @@ -89,6 +119,138 @@ public static CustomTableEnvironmentImpl create( return new CustomTableEnvironmentImpl(streamTableEnvironment); } + public List getModifyOperations() { + return modifyOperations; + } + + public void addModifyOperations(ModifyOperation modifyOperation) { + if (modifyOperation instanceof CreateTableASOperation) { + modifyOperations.add(getModifyOperation((CreateTableASOperation) modifyOperation)); + } else if (modifyOperation instanceof ReplaceTableAsOperation) { + modifyOperations.add(getModifyOperation((ReplaceTableAsOperation) modifyOperation)); + } else { + modifyOperations.add(modifyOperation); + } + } + + private ModifyOperation getModifyOperation(CreateTableASOperation ctasOperation) { + CreateTableOperation createTableOperation = ctasOperation.getCreateTableOperation(); + ObjectIdentifier tableIdentifier = createTableOperation.getTableIdentifier(); + Catalog catalog = getCatalogManager().getCatalogOrThrowException(tableIdentifier.getCatalogName()); + ResolvedCatalogTable catalogTable = + getCatalogManager().resolveCatalogTable(createTableOperation.getCatalogTable()); + Optional stagingDynamicTableSink = + getSupportsStagingDynamicTableSink(createTableOperation, catalog, catalogTable); + if (stagingDynamicTableSink.isPresent()) { + // use atomic ctas + DynamicTableSink dynamicTableSink = stagingDynamicTableSink.get(); + SupportsStaging.StagingPurpose stagingPurpose = createTableOperation.isIgnoreIfExists() + ? SupportsStaging.StagingPurpose.CREATE_TABLE_AS_IF_NOT_EXISTS + : SupportsStaging.StagingPurpose.CREATE_TABLE_AS; + StagedTable stagedTable = + ((SupportsStaging) dynamicTableSink).applyStaging(new SinkStagingContext(stagingPurpose)); + StagingSinkJobStatusHook stagingSinkJobStatusHook = new StagingSinkJobStatusHook(stagedTable); + return ctasOperation.toStagedSinkModifyOperation(tableIdentifier, catalogTable, catalog, dynamicTableSink); + } + // use non-atomic ctas, create table first + executeInternal(createTableOperation); + return ctasOperation.toSinkModifyOperation(getCatalogManager()); + } + + private ModifyOperation getModifyOperation(ReplaceTableAsOperation rtasOperation) { + CreateTableOperation createTableOperation = rtasOperation.getCreateTableOperation(); + ObjectIdentifier tableIdentifier = createTableOperation.getTableIdentifier(); + // First check if the replacedTable exists + Optional replacedTable = getCatalogManager().getTable(tableIdentifier); + if (!rtasOperation.isCreateOrReplace() && !replacedTable.isPresent()) { + throw new TableException(String.format( + "The table %s to be replaced doesn't exist. " + + "You can try to use CREATE TABLE AS statement or " + + "CREATE OR REPLACE TABLE AS statement.", + tableIdentifier)); + } + Catalog catalog = getCatalogManager().getCatalogOrThrowException(tableIdentifier.getCatalogName()); + ResolvedCatalogTable catalogTable = + getCatalogManager().resolveCatalogTable(createTableOperation.getCatalogTable()); + Optional stagingDynamicTableSink = + getSupportsStagingDynamicTableSink(createTableOperation, catalog, catalogTable); + if (stagingDynamicTableSink.isPresent()) { + // use atomic rtas + DynamicTableSink dynamicTableSink = stagingDynamicTableSink.get(); + SupportsStaging.StagingPurpose stagingPurpose = rtasOperation.isCreateOrReplace() + ? SupportsStaging.StagingPurpose.CREATE_OR_REPLACE_TABLE_AS + : SupportsStaging.StagingPurpose.REPLACE_TABLE_AS; + + StagedTable stagedTable = + ((SupportsStaging) dynamicTableSink).applyStaging(new SinkStagingContext(stagingPurpose)); + StagingSinkJobStatusHook stagingSinkJobStatusHook = new StagingSinkJobStatusHook(stagedTable); + return rtasOperation.toStagedSinkModifyOperation(tableIdentifier, catalogTable, catalog, dynamicTableSink); + } + // non-atomic rtas drop table first if exists, then create + if (replacedTable.isPresent()) { + getCatalogManager().dropTable(tableIdentifier, false); + } + executeInternal(createTableOperation); + return rtasOperation.toSinkModifyOperation(getCatalogManager()); + } + + private Optional getSupportsStagingDynamicTableSink( + CreateTableOperation createTableOperation, Catalog catalog, ResolvedCatalogTable catalogTable) { + TableConfig tableConfig = getTableEnvironment().getConfig(); + boolean isStreamingMode = true; + RuntimeExecutionMode runtimeExecutionMode = + getStreamExecutionEnvironment().getConfiguration().get(ExecutionOptions.RUNTIME_MODE); + if (RuntimeExecutionMode.BATCH.equals(runtimeExecutionMode)) { + isStreamingMode = false; + } + if (tableConfig.get(TableConfigOptions.TABLE_RTAS_CTAS_ATOMICITY_ENABLED)) { + if (!TableFactoryUtil.isLegacyConnectorOptions( + catalog, + tableConfig, + isStreamingMode, + createTableOperation.getTableIdentifier(), + catalogTable, + createTableOperation.isTemporary())) { + try { + DynamicTableSink dynamicTableSink = ExecutableOperationUtils.createDynamicTableSink( + catalog, + () -> (new ModuleManager()).getFactory((Module::getTableSinkFactory)), + createTableOperation.getTableIdentifier(), + catalogTable, + Collections.emptyMap(), + tableConfig, + getUserClassLoader(), + createTableOperation.isTemporary()); + if (dynamicTableSink instanceof SupportsStaging) { + return Optional.of(dynamicTableSink); + } + } catch (Exception e) { + throw new TableException( + String.format( + "Fail to create DynamicTableSink for the table %s, " + + "maybe the table does not support atomicity of CTAS/RTAS, " + + "please set %s to false and try again.", + createTableOperation.getTableIdentifier(), + TableConfigOptions.TABLE_RTAS_CTAS_ATOMICITY_ENABLED.key()), + e); + } + } + } + return Optional.empty(); + } + + public void addOperator(Transformation transformation) { + getStreamExecutionEnvironment().addOperator(transformation); + } + + public void clearModifyOperations() { + modifyOperations.clear(); + } + + public List> transOperatoinsToTransformation(List modifyOperations) { + return getPlanner().translate(modifyOperations); + } + @Override public ObjectNode getStreamGraph(String statement) { List operations = super.getParser().parse(statement); @@ -147,6 +309,44 @@ public StreamGraph getStreamGraphFromInserts(List statements) { return transOperatoinsToStreamGraph(modifyOperations); } + public Operation getOperationFromStatement(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + return operations.get(0); + } + + public ModifyOperation getModifyOperationFromInsert(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + return (ModifyOperation) operation; + } else if (operation instanceof QueryOperation) { + log.info("Select statement is skipped."); + return null; + } else { + throw new TableException("Only insert or select statement is supported now."); + } + } + + public StreamGraph getStreamGraph() { + return transOperatoinsToStreamGraph(modifyOperations); + } + + public StreamGraph getStreamGraphFromModifyOperations(List modifyOperations) { + return transOperatoinsToStreamGraph(modifyOperations); + } + @Override public void createCatalog(String catalogName, CatalogDescriptor catalogDescriptor) { getCatalogManager().createCatalog(catalogName, catalogDescriptor); @@ -154,7 +354,71 @@ public void createCatalog(String catalogName, CatalogDescriptor catalogDescripto @Override public TableResultInternal executeCachedPlanInternal(CachedPlan cachedPlan) { - return null; } + + public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { + List operations = getParser().parse(statement); + if (operations.size() != 1) { + throw new DinkyException("Unsupported SQL explain! explainSql() only accepts a single SQL."); + } + SqlExplainResult record = new SqlExplainResult(); + if (operations.isEmpty()) { + throw new DinkyException("No statement is explained."); + } + record.setParseTrue(true); + Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + if (operation instanceof ReplaceTableAsOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("RTAS"); + } else if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); + record.setType("DML"); + } + } else if (operation instanceof ExplainOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("Explain"); + } else if (operation instanceof QueryOperation) { + record.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); + record.setType("DQL"); + } else { + record.setExplain(operation.asSummaryString()); + record.setType("DDL"); + } + record.setExplainTrue(true); + return record; + } + + public SqlExplainResult explainModifyOperations( + List modifyOperations, ExplainDetail... extraDetails) { + SqlExplainResult record = new SqlExplainResult(); + if (modifyOperations.isEmpty()) { + throw new DinkyException("No modify operation is explained."); + } + record.setParseTrue(true); + if (modifyOperations.size() == 1) { + Operation operation = modifyOperations.get(0); + if (operation instanceof ReplaceTableAsOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("RTAS"); + } else if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain( + getPlanner().explain(new ArrayList<>(modifyOperations), ExplainFormat.TEXT, extraDetails)); + record.setType("DML"); + } + } else { + record.setExplain( + getPlanner().explain(new ArrayList<>(modifyOperations), ExplainFormat.TEXT, extraDetails)); + record.setType("Statement Set"); + } + record.setExplainTrue(true); + return record; + } } diff --git a/dinky-client/dinky-client-1.20/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java b/dinky-client/dinky-client-1.20/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java index 6414271715..cf1724512f 100644 --- a/dinky-client/dinky-client-1.20/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java +++ b/dinky-client/dinky-client-1.20/src/main/java/org/dinky/executor/AbstractCustomTableEnvironment.java @@ -20,22 +20,14 @@ package org.dinky.executor; import org.dinky.data.model.LineageRel; -import org.dinky.data.result.SqlExplainResult; import org.dinky.utils.LineageContext; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; -import org.apache.flink.table.api.ExplainDetail; -import org.apache.flink.table.api.ExplainFormat; import org.apache.flink.table.api.TableEnvironment; -import org.apache.flink.table.api.TableException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.table.api.bridge.java.internal.StreamTableEnvironmentImpl; import org.apache.flink.table.delegation.Planner; -import org.apache.flink.table.operations.ExplainOperation; -import org.apache.flink.table.operations.ModifyOperation; -import org.apache.flink.table.operations.Operation; -import org.apache.flink.table.operations.QueryOperation; import java.util.List; @@ -85,36 +77,6 @@ public Configuration getRootConfiguration() { return (Configuration) this.getConfig().getRootConfiguration(); } - @Override - public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { - List operations = getParser().parse(statement); - if (operations.size() != 1) { - throw new TableException("Unsupported SQL query! explainSql() only accepts a single SQL query."); - } - - Operation operation = operations.get(0); - SqlExplainResult data = new SqlExplainResult(); - data.setParseTrue(true); - data.setExplainTrue(true); - - if (operation instanceof ModifyOperation) { - data.setType("Modify DML"); - } else if (operation instanceof ExplainOperation) { - data.setType("Explain DML"); - } else if (operation instanceof QueryOperation) { - data.setType("Query DML"); - } else { - data.setExplain(operation.asSummaryString()); - data.setType("DDL"); - - // data.setExplain("DDL statement needn't comment。"); - return data; - } - - data.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); - return data; - } - @Override public List getLineage(String statement) { LineageContext lineageContext = new LineageContext(this); diff --git a/dinky-client/dinky-client-1.20/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java b/dinky-client/dinky-client-1.20/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java index 74c58c0d64..2d16791b00 100644 --- a/dinky-client/dinky-client-1.20/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java +++ b/dinky-client/dinky-client-1.20/src/main/java/org/dinky/executor/CustomTableEnvironmentImpl.java @@ -19,10 +19,14 @@ package org.dinky.executor; +import org.dinky.data.exception.DinkyException; +import org.dinky.data.result.SqlExplainResult; import org.dinky.operations.CustomNewParserImpl; +import org.apache.flink.api.common.RuntimeExecutionMode; import org.apache.flink.api.dag.Transformation; import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.ExecutionOptions; import org.apache.flink.configuration.PipelineOptions; import org.apache.flink.runtime.jobgraph.jsonplan.JsonPlanGenerator; import org.apache.flink.runtime.rest.messages.JobPlanInfo; @@ -30,16 +34,40 @@ import org.apache.flink.streaming.api.graph.JSONGenerator; import org.apache.flink.streaming.api.graph.StreamGraph; import org.apache.flink.table.api.EnvironmentSettings; +import org.apache.flink.table.api.ExplainDetail; +import org.apache.flink.table.api.ExplainFormat; +import org.apache.flink.table.api.TableConfig; import org.apache.flink.table.api.TableException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.config.TableConfigOptions; import org.apache.flink.table.api.internal.CachedPlan; import org.apache.flink.table.api.internal.TableResultInternal; +import org.apache.flink.table.catalog.Catalog; import org.apache.flink.table.catalog.CatalogDescriptor; +import org.apache.flink.table.catalog.ContextResolvedTable; +import org.apache.flink.table.catalog.ObjectIdentifier; +import org.apache.flink.table.catalog.ResolvedCatalogTable; +import org.apache.flink.table.catalog.StagedTable; +import org.apache.flink.table.connector.sink.DynamicTableSink; +import org.apache.flink.table.connector.sink.SinkStagingContext; +import org.apache.flink.table.connector.sink.abilities.SupportsStaging; +import org.apache.flink.table.execution.StagingSinkJobStatusHook; +import org.apache.flink.table.factories.TableFactoryUtil; +import org.apache.flink.table.module.Module; +import org.apache.flink.table.module.ModuleManager; +import org.apache.flink.table.operations.CreateTableASOperation; +import org.apache.flink.table.operations.ExplainOperation; import org.apache.flink.table.operations.ModifyOperation; import org.apache.flink.table.operations.Operation; +import org.apache.flink.table.operations.QueryOperation; +import org.apache.flink.table.operations.ReplaceTableAsOperation; +import org.apache.flink.table.operations.ddl.CreateTableOperation; +import org.apache.flink.table.operations.utils.ExecutableOperationUtils; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.stream.Collectors; import org.slf4j.Logger; @@ -60,6 +88,8 @@ public class CustomTableEnvironmentImpl extends AbstractCustomTableEnvironment { private static final ObjectMapper mapper = new ObjectMapper(); + private List modifyOperations = new ArrayList<>(); + public CustomTableEnvironmentImpl(StreamTableEnvironment streamTableEnvironment) { super(streamTableEnvironment); injectParser(new CustomNewParserImpl(this, getPlanner().getParser())); @@ -89,6 +119,138 @@ public static CustomTableEnvironmentImpl create( return new CustomTableEnvironmentImpl(streamTableEnvironment); } + public List getModifyOperations() { + return modifyOperations; + } + + public void addModifyOperations(ModifyOperation modifyOperation) { + if (modifyOperation instanceof CreateTableASOperation) { + modifyOperations.add(getModifyOperation((CreateTableASOperation) modifyOperation)); + } else if (modifyOperation instanceof ReplaceTableAsOperation) { + modifyOperations.add(getModifyOperation((ReplaceTableAsOperation) modifyOperation)); + } else { + modifyOperations.add(modifyOperation); + } + } + + private ModifyOperation getModifyOperation(CreateTableASOperation ctasOperation) { + CreateTableOperation createTableOperation = ctasOperation.getCreateTableOperation(); + ObjectIdentifier tableIdentifier = createTableOperation.getTableIdentifier(); + Catalog catalog = getCatalogManager().getCatalogOrThrowException(tableIdentifier.getCatalogName()); + ResolvedCatalogTable catalogTable = + getCatalogManager().resolveCatalogTable(createTableOperation.getCatalogTable()); + Optional stagingDynamicTableSink = + getSupportsStagingDynamicTableSink(createTableOperation, catalog, catalogTable); + if (stagingDynamicTableSink.isPresent()) { + // use atomic ctas + DynamicTableSink dynamicTableSink = stagingDynamicTableSink.get(); + SupportsStaging.StagingPurpose stagingPurpose = createTableOperation.isIgnoreIfExists() + ? SupportsStaging.StagingPurpose.CREATE_TABLE_AS_IF_NOT_EXISTS + : SupportsStaging.StagingPurpose.CREATE_TABLE_AS; + StagedTable stagedTable = + ((SupportsStaging) dynamicTableSink).applyStaging(new SinkStagingContext(stagingPurpose)); + StagingSinkJobStatusHook stagingSinkJobStatusHook = new StagingSinkJobStatusHook(stagedTable); + return ctasOperation.toStagedSinkModifyOperation(tableIdentifier, catalogTable, catalog, dynamicTableSink); + } + // use non-atomic ctas, create table first + executeInternal(createTableOperation); + return ctasOperation.toSinkModifyOperation(getCatalogManager()); + } + + private ModifyOperation getModifyOperation(ReplaceTableAsOperation rtasOperation) { + CreateTableOperation createTableOperation = rtasOperation.getCreateTableOperation(); + ObjectIdentifier tableIdentifier = createTableOperation.getTableIdentifier(); + // First check if the replacedTable exists + Optional replacedTable = getCatalogManager().getTable(tableIdentifier); + if (!rtasOperation.isCreateOrReplace() && !replacedTable.isPresent()) { + throw new TableException(String.format( + "The table %s to be replaced doesn't exist. " + + "You can try to use CREATE TABLE AS statement or " + + "CREATE OR REPLACE TABLE AS statement.", + tableIdentifier)); + } + Catalog catalog = getCatalogManager().getCatalogOrThrowException(tableIdentifier.getCatalogName()); + ResolvedCatalogTable catalogTable = + getCatalogManager().resolveCatalogTable(createTableOperation.getCatalogTable()); + Optional stagingDynamicTableSink = + getSupportsStagingDynamicTableSink(createTableOperation, catalog, catalogTable); + if (stagingDynamicTableSink.isPresent()) { + // use atomic rtas + DynamicTableSink dynamicTableSink = stagingDynamicTableSink.get(); + SupportsStaging.StagingPurpose stagingPurpose = rtasOperation.isCreateOrReplace() + ? SupportsStaging.StagingPurpose.CREATE_OR_REPLACE_TABLE_AS + : SupportsStaging.StagingPurpose.REPLACE_TABLE_AS; + + StagedTable stagedTable = + ((SupportsStaging) dynamicTableSink).applyStaging(new SinkStagingContext(stagingPurpose)); + StagingSinkJobStatusHook stagingSinkJobStatusHook = new StagingSinkJobStatusHook(stagedTable); + return rtasOperation.toStagedSinkModifyOperation(tableIdentifier, catalogTable, catalog, dynamicTableSink); + } + // non-atomic rtas drop table first if exists, then create + if (replacedTable.isPresent()) { + getCatalogManager().dropTable(tableIdentifier, false); + } + executeInternal(createTableOperation); + return rtasOperation.toSinkModifyOperation(getCatalogManager()); + } + + private Optional getSupportsStagingDynamicTableSink( + CreateTableOperation createTableOperation, Catalog catalog, ResolvedCatalogTable catalogTable) { + TableConfig tableConfig = getTableEnvironment().getConfig(); + boolean isStreamingMode = true; + RuntimeExecutionMode runtimeExecutionMode = + getStreamExecutionEnvironment().getConfiguration().get(ExecutionOptions.RUNTIME_MODE); + if (RuntimeExecutionMode.BATCH.equals(runtimeExecutionMode)) { + isStreamingMode = false; + } + if (tableConfig.get(TableConfigOptions.TABLE_RTAS_CTAS_ATOMICITY_ENABLED)) { + if (!TableFactoryUtil.isLegacyConnectorOptions( + catalog, + tableConfig, + isStreamingMode, + createTableOperation.getTableIdentifier(), + catalogTable, + createTableOperation.isTemporary())) { + try { + DynamicTableSink dynamicTableSink = ExecutableOperationUtils.createDynamicTableSink( + catalog, + () -> (new ModuleManager()).getFactory((Module::getTableSinkFactory)), + createTableOperation.getTableIdentifier(), + catalogTable, + Collections.emptyMap(), + tableConfig, + getUserClassLoader(), + createTableOperation.isTemporary()); + if (dynamicTableSink instanceof SupportsStaging) { + return Optional.of(dynamicTableSink); + } + } catch (Exception e) { + throw new TableException( + String.format( + "Fail to create DynamicTableSink for the table %s, " + + "maybe the table does not support atomicity of CTAS/RTAS, " + + "please set %s to false and try again.", + createTableOperation.getTableIdentifier(), + TableConfigOptions.TABLE_RTAS_CTAS_ATOMICITY_ENABLED.key()), + e); + } + } + } + return Optional.empty(); + } + + public void addOperator(Transformation transformation) { + getStreamExecutionEnvironment().addOperator(transformation); + } + + public void clearModifyOperations() { + modifyOperations.clear(); + } + + public List> transOperatoinsToTransformation(List modifyOperations) { + return getPlanner().translate(modifyOperations); + } + @Override public ObjectNode getStreamGraph(String statement) { List operations = super.getParser().parse(statement); @@ -147,6 +309,43 @@ public StreamGraph getStreamGraphFromInserts(List statements) { return transOperatoinsToStreamGraph(modifyOperations); } + public Operation getOperationFromStatement(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + return operations.get(0); + } + + public ModifyOperation getModifyOperationFromInsert(String statement) { + List operations = getParser().parse(statement); + if (operations.isEmpty()) { + throw new TableException("No statement is parsed."); + } + if (operations.size() > 1) { + throw new TableException("Only single statement is supported."); + } + Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + return (ModifyOperation) operation; + } else if (operation instanceof QueryOperation) { + log.info("Select statement is skipped."); + return null; + } else { + throw new TableException("Only insert or select statement is supported now."); + } + } + + public StreamGraph getStreamGraph() { + return transOperatoinsToStreamGraph(modifyOperations); + } + + public StreamGraph getStreamGraphFromModifyOperations(List modifyOperations) { + return transOperatoinsToStreamGraph(modifyOperations); + } @Override public void createCatalog(String catalogName, CatalogDescriptor catalogDescriptor) { getCatalogManager().createCatalog(catalogName, catalogDescriptor); @@ -154,7 +353,71 @@ public void createCatalog(String catalogName, CatalogDescriptor catalogDescripto @Override public TableResultInternal executeCachedPlanInternal(CachedPlan cachedPlan) { - return null; } + + public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { + List operations = getParser().parse(statement); + if (operations.size() != 1) { + throw new DinkyException("Unsupported SQL explain! explainSql() only accepts a single SQL."); + } + SqlExplainResult record = new SqlExplainResult(); + if (operations.isEmpty()) { + throw new DinkyException("No statement is explained."); + } + record.setParseTrue(true); + Operation operation = operations.get(0); + if (operation instanceof ModifyOperation) { + if (operation instanceof ReplaceTableAsOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("RTAS"); + } else if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); + record.setType("DML"); + } + } else if (operation instanceof ExplainOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("Explain"); + } else if (operation instanceof QueryOperation) { + record.setExplain(getPlanner().explain(operations, ExplainFormat.TEXT, extraDetails)); + record.setType("DQL"); + } else { + record.setExplain(operation.asSummaryString()); + record.setType("DDL"); + } + record.setExplainTrue(true); + return record; + } + + public SqlExplainResult explainModifyOperations( + List modifyOperations, ExplainDetail... extraDetails) { + SqlExplainResult record = new SqlExplainResult(); + if (modifyOperations.isEmpty()) { + throw new DinkyException("No modify operation is explained."); + } + record.setParseTrue(true); + if (modifyOperations.size() == 1) { + Operation operation = modifyOperations.get(0); + if (operation instanceof ReplaceTableAsOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("RTAS"); + } else if (operation instanceof CreateTableASOperation) { + record.setExplain(operation.asSummaryString()); + record.setType("CTAS"); + } else { + record.setExplain( + getPlanner().explain(new ArrayList<>(modifyOperations), ExplainFormat.TEXT, extraDetails)); + record.setType("DML"); + } + } else { + record.setExplain( + getPlanner().explain(new ArrayList<>(modifyOperations), ExplainFormat.TEXT, extraDetails)); + record.setType("Statement Set"); + } + record.setExplainTrue(true); + return record; + } } diff --git a/dinky-client/dinky-client-base/src/main/java/org/dinky/executor/CustomTableEnvironment.java b/dinky-client/dinky-client-base/src/main/java/org/dinky/executor/CustomTableEnvironment.java index 794375f761..93bfd26db6 100644 --- a/dinky-client/dinky-client-base/src/main/java/org/dinky/executor/CustomTableEnvironment.java +++ b/dinky-client/dinky-client-base/src/main/java/org/dinky/executor/CustomTableEnvironment.java @@ -22,6 +22,7 @@ import org.dinky.data.model.LineageRel; import org.dinky.data.result.SqlExplainResult; +import org.apache.flink.api.dag.Transformation; import org.apache.flink.configuration.ConfigOption; import org.apache.flink.configuration.Configuration; import org.apache.flink.configuration.PipelineOptions; @@ -33,6 +34,8 @@ import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.apache.flink.table.api.internal.TableEnvironmentInternal; import org.apache.flink.table.delegation.Planner; +import org.apache.flink.table.operations.ModifyOperation; +import org.apache.flink.table.operations.Operation; import java.io.File; import java.net.URL; @@ -54,14 +57,34 @@ public interface CustomTableEnvironment extends StreamTableEnvironment, TableEnvironmentInternal, TableEnvironmentInstance { + void addModifyOperations(ModifyOperation modifyOperation); + + List getModifyOperations(); + + void addOperator(Transformation transformation); + + void clearModifyOperations(); + + List> transOperatoinsToTransformation(List modifyOperations); + ObjectNode getStreamGraph(String statement); + StreamGraph getStreamGraph(); + JobPlanInfo getJobPlanInfo(List statements); StreamGraph getStreamGraphFromInserts(List statements); + Operation getOperationFromStatement(String statement); + + ModifyOperation getModifyOperationFromInsert(String statement); + + StreamGraph getStreamGraphFromModifyOperations(List modifyOperations); + SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails); + SqlExplainResult explainModifyOperations(List modifyOperations, ExplainDetail... extraDetails); + StreamExecutionEnvironment getStreamExecutionEnvironment(); Planner getPlanner(); diff --git a/dinky-client/dinky-client-base/src/main/java/org/dinky/parser/SqlType.java b/dinky-client/dinky-client-base/src/main/java/org/dinky/parser/SqlType.java index 96cabd266c..37a988e28b 100644 --- a/dinky-client/dinky-client-base/src/main/java/org/dinky/parser/SqlType.java +++ b/dinky-client/dinky-client-base/src/main/java/org/dinky/parser/SqlType.java @@ -30,16 +30,24 @@ * @since 2021/7/3 11:11 */ public enum SqlType { - SELECT("SELECT", "^SELECT.*", SqlCategory.DQL), + SELECT("SELECT", "^\\(*\\s*SELECT.*", SqlCategory.DQL), CREATE("CREATE", "^CREATE(?!\\s+TABLE.*AS SELECT).*$", SqlCategory.DDL), + // REPLACE("REPLACE", "^REPLACE.*", SqlCategory.DML), + DROP("DROP", "^DROP.*", SqlCategory.DDL), + TRUNCATE("TRUNCATE ", "^TRUNCATE .*", SqlCategory.DDL), + ALTER("ALTER", "^ALTER.*", SqlCategory.DDL), INSERT("INSERT", "^INSERT.*", SqlCategory.DML), + UPDATE("UPDATE", "^UPDATE.*", SqlCategory.DML), + + DELETE("DELETE", "^DELETE.*", SqlCategory.DML), + DESC("DESC", "^DESC.*", SqlCategory.DDL), DESCRIBE("DESCRIBE", "^DESCRIBE.*", SqlCategory.DDL), @@ -50,6 +58,8 @@ public enum SqlType { SHOW("SHOW", "^SHOW.*", SqlCategory.DDL), + ANALYZE("ANALYZE ", "^ANALYZE.*", SqlCategory.DDL), + LOAD("LOAD", "^LOAD.*", SqlCategory.DDL), UNLOAD("UNLOAD", "^UNLOAD.*", SqlCategory.DDL), @@ -58,15 +68,23 @@ public enum SqlType { RESET("RESET", "^RESET.*", SqlCategory.DDL), - EXECUTE("EXECUTE", "^EXECUTE.*", SqlCategory.DQL), + EXECUTE("EXECUTE", "^EXECUTE.*", SqlCategory.DML), ADD_JAR("ADD_JAR", "^ADD\\s+JAR\\s+\\S+", SqlCategory.DDL), + ADD("ADD", "^ADD\\s+CUSTOMJAR\\s+\\S+", SqlCategory.DDL), + ADD_FILE("ADD_FILE", "^ADD\\s+FILE\\s+\\S+", SqlCategory.DDL), + REMOVE("REMOVE", "^REMOVE.*", SqlCategory.DDL), + + STOP("STOP", "^STOP.*", SqlCategory.DDL), + PRINT("PRINT", "^PRINT.*", SqlCategory.DQL), - CTAS("CTAS", "^CREATE\\s.*AS\\sSELECT.*$", SqlCategory.DDL), + CTAS("CTAS", "^CREATE\\s.*AS\\sSELECT.*$", SqlCategory.DML), + + RTAS("RTAS", "^REPLACE\\s.*AS\\sSELECT.*$", SqlCategory.DML), WITH("WITH", "^WITH.*", SqlCategory.DQL), @@ -77,7 +95,9 @@ public enum SqlType { private SqlCategory category; private static final List TRANS_SQL_TYPES = - Lists.newArrayList(INSERT, SELECT, WITH, SHOW, DESCRIBE, DESC, CTAS); + Lists.newArrayList(INSERT, SELECT, WITH, CTAS, RTAS, UPDATE, DELETE); + + private static final List CTAS_TYPES = Lists.newArrayList(CTAS, RTAS); SqlType(String type, String regrex, SqlCategory category) { this.type = type; @@ -104,4 +124,8 @@ public boolean match(String statement) { public static List getTransSqlTypes() { return TRANS_SQL_TYPES; } + + public static List getCtasTypes() { + return CTAS_TYPES; + } } diff --git a/dinky-common/src/main/java/org/dinky/data/model/SystemConfiguration.java b/dinky-common/src/main/java/org/dinky/data/model/SystemConfiguration.java index a82b233e36..539ca363a0 100644 --- a/dinky-common/src/main/java/org/dinky/data/model/SystemConfiguration.java +++ b/dinky-common/src/main/java/org/dinky/data/model/SystemConfiguration.java @@ -19,6 +19,7 @@ package org.dinky.data.model; +import org.dinky.assertion.Asserts; import org.dinky.context.EngineContextHolder; import org.dinky.data.constant.CommonConstant; import org.dinky.data.constant.DirConstant; @@ -387,7 +388,7 @@ public Map>> getAllConfiguration() { } public boolean isUseRestAPI() { - return useRestAPI.getValue(); + return Asserts.isNull(useRestAPI.getValue()) ? useRestAPI.getDefaultValue() : useRestAPI.getValue(); } public int getJobIdWait() { diff --git a/dinky-common/src/main/java/org/dinky/data/result/SqlExplainResult.java b/dinky-common/src/main/java/org/dinky/data/result/SqlExplainResult.java index b5cb123121..9ef0db33b4 100644 --- a/dinky-common/src/main/java/org/dinky/data/result/SqlExplainResult.java +++ b/dinky-common/src/main/java/org/dinky/data/result/SqlExplainResult.java @@ -21,11 +21,6 @@ import java.time.LocalDateTime; -/** - * 解释结果 - * - * @since 2021/6/7 22:06 - */ public class SqlExplainResult { private Integer index; @@ -36,6 +31,9 @@ public class SqlExplainResult { private String error; private boolean parseTrue; private boolean explainTrue; + + private boolean isSkipped; + private LocalDateTime explainTime; public SqlExplainResult() {} @@ -71,6 +69,7 @@ private SqlExplainResult(Builder builder) { setParseTrue(builder.parseTrue); setExplainTrue(builder.explainTrue); setExplainTime(builder.explainTime); + setSkipped(builder.isSkipped); } public static SqlExplainResult success(String type, String sql, String explain) { @@ -92,6 +91,7 @@ public static Builder newBuilder(SqlExplainResult copy) { builder.parseTrue = copy.isParseTrue(); builder.explainTrue = copy.isExplainTrue(); builder.explainTime = copy.getExplainTime(); + builder.isSkipped = copy.isSkipped(); return builder; } @@ -167,6 +167,14 @@ public void setExplainTime(LocalDateTime explainTime) { this.explainTime = explainTime; } + public boolean isSkipped() { + return isSkipped; + } + + public void setSkipped(boolean skipped) { + isSkipped = skipped; + } + @Override public String toString() { return String.format( @@ -184,6 +192,7 @@ public static final class Builder { private String error; private boolean parseTrue; private boolean explainTrue; + private boolean isSkipped = false; private LocalDateTime explainTime; private Builder() {} @@ -232,6 +241,11 @@ public Builder explainTrue(boolean val) { return this; } + public Builder isSkipped() { + isSkipped = true; + return this; + } + public Builder explainTime(LocalDateTime val) { explainTime = val; return this; diff --git a/dinky-core/src/main/java/com/ververica/cdc/composer/flink/FlinkPipelineExecution.java b/dinky-core/src/main/java/com/ververica/cdc/composer/flink/FlinkPipelineExecution.java deleted file mode 100644 index 52e5f04e4f..0000000000 --- a/dinky-core/src/main/java/com/ververica/cdc/composer/flink/FlinkPipelineExecution.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.flink.cdc.composer.flink; - -import org.apache.flink.cdc.composer.PipelineExecution; -import org.apache.flink.core.execution.JobClient; -import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; - -/** - * A pipeline execution that run the defined pipeline via Flink's {@link - * StreamExecutionEnvironment}. - */ -public class FlinkPipelineExecution implements PipelineExecution { - - private final StreamExecutionEnvironment env; - private final String jobName; - private final boolean isBlocking; - - public FlinkPipelineExecution(StreamExecutionEnvironment env, String jobName, boolean isBlocking) { - this.env = env; - this.jobName = jobName; - this.isBlocking = isBlocking; - } - - public StreamExecutionEnvironment getEnv() { - return env; - } - - public String getJobName() { - return jobName; - } - - public boolean isBlocking() { - return isBlocking; - } - - @Override - public ExecutionInfo execute() throws Exception { - JobClient jobClient = env.executeAsync(jobName); - if (isBlocking) { - jobClient.getJobExecutionResult().get(); - } - return new ExecutionInfo(jobClient.getJobID().toString(), jobName); - } -} diff --git a/dinky-core/src/main/java/org/dinky/executor/Executor.java b/dinky-core/src/main/java/org/dinky/executor/Executor.java index f9be626daf..98e09cc2dc 100644 --- a/dinky-core/src/main/java/org/dinky/executor/Executor.java +++ b/dinky-core/src/main/java/org/dinky/executor/Executor.java @@ -30,6 +30,7 @@ import org.apache.flink.api.common.ExecutionConfig; import org.apache.flink.api.common.JobExecutionResult; +import org.apache.flink.api.dag.Transformation; import org.apache.flink.configuration.Configuration; import org.apache.flink.configuration.PipelineOptions; import org.apache.flink.core.execution.JobClient; @@ -44,6 +45,10 @@ import org.apache.flink.table.api.StatementSet; import org.apache.flink.table.api.TableConfig; import org.apache.flink.table.api.TableResult; +import org.apache.flink.table.api.internal.TableResultInternal; +import org.apache.flink.table.catalog.CatalogManager; +import org.apache.flink.table.operations.ModifyOperation; +import org.apache.flink.table.operations.Operation; import java.io.File; import java.net.URL; @@ -209,6 +214,18 @@ public TableResult executeSql(String statement) { return tableEnvironment.executeSql(statement); } + public TableResultInternal executeModifyOperations() { + return tableEnvironment.executeInternal(tableEnvironment.getModifyOperations()); + } + + public void addOperatorFromModifyOperations() { + List> transformations = + transOperatoinsToTransformation(tableEnvironment.getModifyOperations()); + if (transformations != null) { + transformations.forEach(this::addOperator); + } + } + public void initUDF(String... udfFilePath) { List jarFiles = DinkyClassLoader.getJarFiles(udfFilePath, null); dinkyClassLoader.addURLs(jarFiles); @@ -238,6 +255,26 @@ public void addJar(File... jarPath) { addJar(Arrays.stream(jarPath).map(URLUtil::getURL).map(URL::toString).toArray(String[]::new)); } + public void addModifyOperations(ModifyOperation modifyOperation) { + tableEnvironment.addModifyOperations(modifyOperation); + } + + public void clearModifyOperations() { + tableEnvironment.clearModifyOperations(); + } + + public void addOperator(Transformation transformation) { + tableEnvironment.addOperator(transformation); + } + + public List> transOperatoinsToTransformation(List modifyOperations) { + return tableEnvironment.transOperatoinsToTransformation(modifyOperations); + } + + public CatalogManager getCatalogManager() { + return tableEnvironment.getCatalogManager(); + } + public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extraDetails) { statement = pretreatStatement(statement); if (Asserts.isNotNullString(statement) && !pretreatExecute(statement).isNoExecute()) { @@ -246,6 +283,10 @@ public SqlExplainResult explainSqlRecord(String statement, ExplainDetail... extr return null; } + public SqlExplainResult explainModifyOperations(ExplainDetail... extraDetails) { + return tableEnvironment.explainModifyOperations(tableEnvironment.getModifyOperations(), extraDetails); + } + public ObjectNode getStreamGraph(List statements) { StreamGraph streamGraph = tableEnvironment.getStreamGraphFromInserts(statements); return getStreamGraphJsonNode(streamGraph); @@ -269,18 +310,12 @@ public StreamGraph getStreamGraph() { return environment.getStreamGraph(); } - public ObjectNode getStreamGraphFromDataStream(List statements) { - statements.forEach(this::executeSql); - return getStreamGraphJsonNode(getStreamGraph()); + public StreamGraph getStreamGraphModifyOperations() { + return getStreamGraphFromModifyOperations(tableEnvironment.getModifyOperations()); } - public JobPlanInfo getJobPlanInfo(List statements) { - return tableEnvironment.getJobPlanInfo(statements); - } - - public JobPlanInfo getJobPlanInfoFromDataStream(List statements) { - statements.forEach(this::executeSql); - StreamGraph streamGraph = getStreamGraph(); + public JobPlanInfo getJobPlanInfo() { + StreamGraph streamGraph = getStreamGraphFromModifyOperations(tableEnvironment.getModifyOperations()); return new JobPlanInfo(JsonPlanGenerator.generatePlan(streamGraph.getJobGraph())); } @@ -288,16 +323,26 @@ public JobGraph getJobGraphFromInserts(List statements) { return tableEnvironment.getJobGraphFromInserts(statements); } + public ModifyOperation getModifyOperationFromInsert(String statement) { + return tableEnvironment.getModifyOperationFromInsert(statement); + } + + public Operation getOperationFromStatement(String statement) { + return tableEnvironment.getOperationFromStatement(statement); + } + + public StreamGraph getStreamGraphFromModifyOperations(List modifyOperations) { + return tableEnvironment.getStreamGraphFromModifyOperations(modifyOperations); + } + public TableResult executeStatementSet(List statements) { StatementSet statementSet = tableEnvironment.createStatementSet(); statements.forEach(statementSet::addInsertSql); return statementSet.execute(); } - public String explainStatementSet(List statements) { - StatementSet statementSet = tableEnvironment.createStatementSet(); - statements.forEach(statementSet::addInsertSql); - return statementSet.explain(); + public TableResult executeOperation(Operation operation) { + return tableEnvironment.executeInternal(operation); } public List getLineage(String statement) { diff --git a/dinky-core/src/main/java/org/dinky/explainer/Explainer.java b/dinky-core/src/main/java/org/dinky/explainer/Explainer.java index 1b266f4858..fa9cb5dfa9 100644 --- a/dinky-core/src/main/java/org/dinky/explainer/Explainer.java +++ b/dinky-core/src/main/java/org/dinky/explainer/Explainer.java @@ -25,54 +25,35 @@ import org.dinky.data.model.LineageRel; import org.dinky.data.result.ExplainResult; import org.dinky.data.result.SqlExplainResult; -import org.dinky.executor.CustomTableEnvironment; import org.dinky.executor.Executor; -import org.dinky.explainer.print_table.PrintStatementExplainer; import org.dinky.function.data.model.UDF; import org.dinky.function.util.UDFUtil; import org.dinky.interceptor.FlinkInterceptor; import org.dinky.job.JobConfig; import org.dinky.job.JobManager; import org.dinky.job.JobParam; -import org.dinky.job.StatementParam; +import org.dinky.job.JobRunnerFactory; +import org.dinky.job.JobStatement; +import org.dinky.job.JobStatementPlan; +import org.dinky.job.JobStatementType; import org.dinky.job.builder.JobUDFBuilder; import org.dinky.parser.SqlType; import org.dinky.trans.Operations; -import org.dinky.trans.ddl.CustomSetOperation; -import org.dinky.trans.dml.ExecuteJarOperation; -import org.dinky.trans.parse.AddFileSqlParseStrategy; -import org.dinky.trans.parse.AddJarSqlParseStrategy; -import org.dinky.trans.parse.ExecuteJarParseStrategy; -import org.dinky.trans.parse.SetSqlParseStrategy; import org.dinky.utils.DinkyClassLoaderUtil; -import org.dinky.utils.FlinkStreamEnvironmentUtil; -import org.dinky.utils.IpUtil; -import org.dinky.utils.LogUtil; import org.dinky.utils.SqlUtil; -import org.dinky.utils.URLUtils; -import org.apache.flink.api.dag.Pipeline; -import org.apache.flink.configuration.Configuration; -import org.apache.flink.core.fs.FileSystem; import org.apache.flink.runtime.rest.messages.JobPlanInfo; +import org.apache.flink.streaming.api.graph.JSONGenerator; +import org.apache.flink.streaming.api.graph.StreamGraph; -import java.net.URL; -import java.time.LocalDateTime; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; -import java.util.Map; import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; +import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; -import com.google.common.collect.Sets; -import cn.hutool.core.collection.CollUtil; -import cn.hutool.core.text.StrBuilder; -import cn.hutool.core.text.StrFormatter; import cn.hutool.core.util.StrUtil; import lombok.extern.slf4j.Slf4j; @@ -112,87 +93,49 @@ public Explainer initialize(JobConfig config, String statement) { return this; } - public JobParam pretreatStatements(String[] statements) { - List ddl = new ArrayList<>(); - List trans = new ArrayList<>(); - List execute = new ArrayList<>(); - List statementList = new ArrayList<>(); - List udfList = new ArrayList<>(); - StrBuilder parsedSql = new StrBuilder(); + public JobStatementPlan parseStatements(String[] statements) { + JobStatementPlan jobStatementPlan = new JobStatementPlan(); - List statementsWithUdf = Arrays.stream(statements).collect(Collectors.toList()); + List udfStatements = new ArrayList<>(); Optional.ofNullable(jobManager.getConfig().getUdfRefer()) .ifPresent(t -> t.forEach((key, value) -> { String sql = String.format("create temporary function %s as '%s'", value, key); - statementsWithUdf.add(0, sql); + udfStatements.add(sql); })); + for (String udfStatement : udfStatements) { + jobStatementPlan.addJobStatementGenerated(udfStatement, JobStatementType.UDF, SqlType.CREATE); + } - List transSqlTypes = SqlType.getTransSqlTypes(); - Set transSqlTypeSet = Sets.newHashSet(transSqlTypes); - for (String item : statementsWithUdf) { + for (String item : statements) { String statement = executor.pretreatStatement(item); - parsedSql.append(statement).append(";\n"); if (statement.isEmpty()) { continue; } SqlType operationType = Operations.getOperationType(statement); - if (operationType.equals(SqlType.SET) && SetSqlParseStrategy.INSTANCE.match(statement)) { - CustomSetOperation customSetOperation = new CustomSetOperation(statement); - customSetOperation.execute(this.executor.getCustomTableEnvironment()); + if (operationType.equals(SqlType.SET) || operationType.equals(SqlType.RESET)) { + jobStatementPlan.addJobStatement(statement, JobStatementType.SET, operationType); } else if (operationType.equals(SqlType.ADD)) { - AddJarSqlParseStrategy.getAllFilePath(statement) - .forEach(t -> jobManager.getUdfPathContextHolder().addOtherPlugins(t)); - (executor.getDinkyClassLoader()) - .addURLs(URLUtils.getURLs( - jobManager.getUdfPathContextHolder().getOtherPluginsFiles())); + jobStatementPlan.addJobStatement(statement, JobStatementType.ADD, operationType); } else if (operationType.equals(SqlType.ADD_FILE)) { - AddFileSqlParseStrategy.getAllFilePath(statement) - .forEach(t -> jobManager.getUdfPathContextHolder().addFile(t)); - (executor.getDinkyClassLoader()) - .addURLs(URLUtils.getURLs( - jobManager.getUdfPathContextHolder().getFiles())); + jobStatementPlan.addJobStatement(statement, JobStatementType.ADD_FILE, operationType); } else if (operationType.equals(SqlType.ADD_JAR)) { - Configuration combinationConfig = getCombinationConfig(); - FileSystem.initialize(combinationConfig, null); - ddl.add(new StatementParam(statement, operationType)); - statementList.add(statement); - } else if (transSqlTypeSet.contains(operationType)) { - trans.add(new StatementParam(statement, operationType)); - statementList.add(statement); + jobStatementPlan.addJobStatement(statement, JobStatementType.ADD_JAR, operationType); + } else if (SqlType.getTransSqlTypes().contains(operationType)) { + jobStatementPlan.addJobStatement(statement, JobStatementType.SQL, operationType); if (!useStatementSet) { break; } } else if (operationType.equals(SqlType.EXECUTE)) { - execute.add(new StatementParam(statement, operationType)); + jobStatementPlan.addJobStatement(statement, JobStatementType.EXECUTE, operationType); } else if (operationType.equals(SqlType.PRINT)) { - Map config = this.executor.getExecutorConfig().getConfig(); - String host = config.getOrDefault("dinky.dinkyHost", IpUtil.getHostIp()); - int port = Integer.parseInt(config.getOrDefault("dinky.dinkyPrintPort", "7125")); - String[] tableNames = PrintStatementExplainer.getTableNames(statement); - for (String tableName : tableNames) { - trans.add(new StatementParam( - PrintStatementExplainer.getCreateStatement(tableName, host, port), SqlType.CTAS)); - } + jobStatementPlan.addJobStatement(statement, JobStatementType.PRINT, operationType); + } else if (UDFUtil.isUdfStatement(statement)) { + jobStatementPlan.addJobStatement(statement, JobStatementType.UDF, operationType); } else { - UDF udf = UDFUtil.toUDF(statement, jobManager.getDinkyClassLoader()); - if (Asserts.isNotNull(udf)) { - udfList.add(udf); - } - ddl.add(new StatementParam(statement, operationType)); - statementList.add(statement); + jobStatementPlan.addJobStatement(statement, JobStatementType.DDL, operationType); } } - return new JobParam(statementList, ddl, trans, execute, CollUtil.removeNull(udfList), parsedSql.toString()); - } - - private Configuration getCombinationConfig() { - CustomTableEnvironment cte = executor.getCustomTableEnvironment(); - Configuration rootConfig = cte.getRootConfiguration(); - Configuration config = cte.getConfig().getConfiguration(); - Configuration combinationConfig = new Configuration(); - combinationConfig.addAll(rootConfig); - combinationConfig.addAll(config); - return combinationConfig; + return jobStatementPlan; } public List parseUDFFromStatements(String[] statements) { @@ -211,182 +154,74 @@ public List parseUDFFromStatements(String[] statements) { public ExplainResult explainSql(String statement) { log.info("Start explain FlinkSQL..."); - JobParam jobParam; + JobStatementPlan jobStatementPlan; List sqlExplainRecords = new ArrayList<>(); - int index = 1; boolean correct = true; try { - jobParam = pretreatStatements(SqlUtil.getStatements(statement)); + jobStatementPlan = parseStatements(SqlUtil.getStatements(statement)); + jobStatementPlan.buildFinalExecutableStatement(); } catch (Exception e) { SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); resultBuilder.error(e.getMessage()).parseTrue(false); sqlExplainRecords.add(resultBuilder.build()); - log.error("failed pretreatStatements:", e); + log.error("Failed parseStatements:", e); return new ExplainResult(false, sqlExplainRecords.size(), sqlExplainRecords); } - for (StatementParam item : jobParam.getDdl()) { - SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); - try { - SqlExplainResult recordResult = executor.explainSqlRecord(item.getValue()); - if (Asserts.isNull(recordResult)) { - continue; - } - resultBuilder = SqlExplainResult.newBuilder(recordResult); - executor.executeSql(item.getValue()); - } catch (Exception e) { - String error = StrFormatter.format( - "Exception in executing FlinkSQL:\n{}\n{}", - SqlUtil.addLineNumber(item.getValue()), - LogUtil.getError(e)); - resultBuilder - .error(error) - .explainTrue(false) - .explainTime(LocalDateTime.now()) - .sql(item.getValue()) - .index(index); - sqlExplainRecords.add(resultBuilder.build()); - correct = false; - log.error(error); - break; - } - resultBuilder - .explainTrue(true) - .explainTime(LocalDateTime.now()) - .sql(item.getValue()) - .index(index++); - sqlExplainRecords.add(resultBuilder.build()); - } - if (correct && !jobParam.getTrans().isEmpty()) { - if (useStatementSet) { - List inserts = new ArrayList<>(); - for (StatementParam item : jobParam.getTrans()) { - if (item.getType().equals(SqlType.INSERT) || item.getType().equals(SqlType.CTAS)) { - inserts.add(item.getValue()); - } - } - if (!inserts.isEmpty()) { - SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); - String sqlSet = String.join(";\r\n ", inserts); - try { - resultBuilder - .explain(executor.explainStatementSet(inserts)) - .parseTrue(true) - .explainTrue(true); - } catch (Exception e) { - String error = LogUtil.getError(e); - resultBuilder.error(error).parseTrue(false).explainTrue(false); - correct = false; - log.error(error); - } finally { - resultBuilder - .type("Modify DML") - .explainTime(LocalDateTime.now()) - .sql(sqlSet) - .index(index); - sqlExplainRecords.add(resultBuilder.build()); - } - } - } else { - for (StatementParam item : jobParam.getTrans()) { - SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); - - try { - resultBuilder = SqlExplainResult.newBuilder(executor.explainSqlRecord(item.getValue())); - resultBuilder.parseTrue(true).explainTrue(true); - } catch (Exception e) { - String error = StrFormatter.format( - "Exception in executing FlinkSQL:\n{}\n{}", - SqlUtil.addLineNumber(item.getValue()), - e.getMessage()); - resultBuilder.error(error).parseTrue(false).explainTrue(false); - correct = false; - log.error(error); - } finally { - resultBuilder - .type("Modify DML") - .explainTime(LocalDateTime.now()) - .sql(item.getValue()) - .index(index++); - sqlExplainRecords.add(resultBuilder.build()); - } - } + JobRunnerFactory jobRunnerFactory = JobRunnerFactory.create(jobManager); + for (JobStatement jobStatement : jobStatementPlan.getJobStatementList()) { + SqlExplainResult sqlExplainResult = jobRunnerFactory + .getJobRunner(jobStatement.getStatementType()) + .explain(jobStatement); + if (!sqlExplainResult.isSkipped()) { + sqlExplainRecords.add(sqlExplainResult); } } - for (StatementParam item : jobParam.getExecute()) { - SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); - - try { - SqlExplainResult sqlExplainResult = executor.explainSqlRecord(item.getValue()); - if (Asserts.isNull(sqlExplainResult)) { - sqlExplainResult = new SqlExplainResult(); - } else if (ExecuteJarParseStrategy.INSTANCE.match(item.getValue())) { - List allFileByAdd = jobManager.getAllFileSet(); - Pipeline pipeline = new ExecuteJarOperation(item.getValue()) - .explain(executor.getCustomTableEnvironment(), allFileByAdd); - sqlExplainResult.setExplain(FlinkStreamEnvironmentUtil.getStreamingPlanAsJSON(pipeline)); - } else { - executor.executeSql(item.getValue()); - } - resultBuilder = SqlExplainResult.newBuilder(sqlExplainResult); - resultBuilder.type("DATASTREAM").parseTrue(true); - } catch (Exception e) { - String error = StrFormatter.format( - "Exception in executing FlinkSQL:\n{}\n{}", - SqlUtil.addLineNumber(item.getValue()), - e.getMessage()); - resultBuilder - .error(error) - .explainTrue(false) - .explainTime(LocalDateTime.now()) - .sql(item.getValue()) - .index(index); - sqlExplainRecords.add(resultBuilder.build()); - correct = false; - log.error(error); - break; - } - resultBuilder - .explainTrue(true) - .explainTime(LocalDateTime.now()) - .sql(item.getValue()) - .index(index++); - sqlExplainRecords.add(resultBuilder.build()); - } log.info(StrUtil.format("A total of {} FlinkSQL have been Explained.", sqlExplainRecords.size())); return new ExplainResult(correct, sqlExplainRecords.size(), sqlExplainRecords); } public ObjectNode getStreamGraph(String statement) { - JobParam jobParam = pretreatStatements(SqlUtil.getStatements(statement)); - jobParam.getDdl().forEach(statementParam -> executor.executeSql(statementParam.getValue())); - - if (!jobParam.getTrans().isEmpty()) { - return executor.getStreamGraph(jobParam.getTransStatement()); - } - - if (!jobParam.getExecute().isEmpty()) { - List dataStreamPlans = - jobParam.getExecute().stream().map(StatementParam::getValue).collect(Collectors.toList()); - return executor.getStreamGraphFromDataStream(dataStreamPlans); + log.info("Start explain FlinkSQL..."); + JobStatementPlan jobStatementPlan = parseStatements(SqlUtil.getStatements(statement)); + jobStatementPlan.buildFinalExecutableStatement(); + log.info("Explain FlinkSQL successful"); + JobRunnerFactory jobRunnerFactory = JobRunnerFactory.create(jobManager); + for (JobStatement jobStatement : jobStatementPlan.getJobStatementList()) { + StreamGraph streamGraph = jobRunnerFactory + .getJobRunner(jobStatement.getStatementType()) + .getStreamGraph(jobStatement); + if (Asserts.isNotNull(streamGraph)) { + JSONGenerator jsonGenerator = new JSONGenerator(streamGraph); + String json = jsonGenerator.getJSON(); + ObjectMapper mapper = new ObjectMapper(); + ObjectNode objectNode = mapper.createObjectNode(); + try { + objectNode = (ObjectNode) mapper.readTree(json); + } catch (JsonProcessingException e) { + log.error("Get stream graph json node error.", e); + } + return objectNode; + } } - return mapper.createObjectNode(); + throw new DinkyException("No StreamGraph found."); } public JobPlanInfo getJobPlanInfo(String statement) { - JobParam jobParam = pretreatStatements(SqlUtil.getStatements(statement)); - jobParam.getDdl().forEach(statementParam -> executor.executeSql(statementParam.getValue())); - - if (!jobParam.getTrans().isEmpty()) { - return executor.getJobPlanInfo(jobParam.getTransStatement()); - } - - if (!jobParam.getExecute().isEmpty()) { - List dataStreamPlans = - jobParam.getExecute().stream().map(StatementParam::getValue).collect(Collectors.toList()); - return executor.getJobPlanInfoFromDataStream(dataStreamPlans); + log.info("Start explain FlinkSQL..."); + JobStatementPlan jobStatementPlan = parseStatements(SqlUtil.getStatements(statement)); + jobStatementPlan.buildFinalExecutableStatement(); + log.info("Explain FlinkSQL successful"); + JobRunnerFactory jobRunnerFactory = JobRunnerFactory.create(jobManager); + for (JobStatement jobStatement : jobStatementPlan.getJobStatementList()) { + JobPlanInfo jobPlanInfo = jobRunnerFactory + .getJobRunner(jobStatement.getStatementType()) + .getJobPlanInfo(jobStatement); + if (Asserts.isNotNull(jobPlanInfo)) { + return jobPlanInfo; + } } - throw new RuntimeException("Creating job plan fails because this job doesn't contain an insert statement."); + throw new DinkyException("No JobPlanInfo found."); } public List getLineage(String statement) { diff --git a/dinky-core/src/main/java/org/dinky/job/AbstractJobRunner.java b/dinky-core/src/main/java/org/dinky/job/AbstractJobRunner.java new file mode 100644 index 0000000000..e5c75c0e27 --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/AbstractJobRunner.java @@ -0,0 +1,83 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job; + +import org.dinky.data.result.SqlExplainResult; +import org.dinky.utils.LogUtil; +import org.dinky.utils.SqlUtil; + +import org.apache.flink.runtime.rest.messages.JobPlanInfo; +import org.apache.flink.streaming.api.graph.StreamGraph; + +import java.time.LocalDateTime; + +import cn.hutool.core.text.StrFormatter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public abstract class AbstractJobRunner implements JobRunner { + + protected JobManager jobManager; + + public SqlExplainResult explain(JobStatement jobStatement) { + SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); + try { + run(jobStatement); + resultBuilder + .parseTrue(true) + .explainTrue(true) + .type(jobStatement.getSqlType().getType()) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + } catch (Exception e) { + String error = StrFormatter.format( + "Exception in explaining FlinkSQL:\n{}\n{}", + SqlUtil.addLineNumber(jobStatement.getStatement()), + LogUtil.getError(e)); + resultBuilder + .parseTrue(false) + .error(error) + .explainTrue(false) + .type(jobStatement.getSqlType().getType()) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + log.error(error); + } finally { + resultBuilder.explainTime(LocalDateTime.now()); + return resultBuilder.build(); + } + } + + public StreamGraph getStreamGraph(JobStatement jobStatement) { + explain(jobStatement); + if (jobStatement.isFinalExecutableStatement()) { + return jobManager.getExecutor().getStreamGraph(); + } + return null; + } + + public JobPlanInfo getJobPlanInfo(JobStatement jobStatement) { + explain(jobStatement); + if (jobStatement.isFinalExecutableStatement()) { + return jobManager.getExecutor().getJobPlanInfo(); + } + return null; + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/JobConfig.java b/dinky-core/src/main/java/org/dinky/job/JobConfig.java index 146357da23..bcd39f221b 100644 --- a/dinky-core/src/main/java/org/dinky/job/JobConfig.java +++ b/dinky-core/src/main/java/org/dinky/job/JobConfig.java @@ -218,7 +218,7 @@ public ExecutorConfig getExecutorSetting() { Map config = new HashMap<>(32); if (GatewayType.isDeployCluster(type) && gatewayConfig != null && gatewayConfig.getFlinkConfig() != null) { config.putAll(gatewayConfig.getFlinkConfig().getConfiguration()); - } else { + } else if (Asserts.isNotNullMap(configJson)) { config.putAll(configJson); } return ExecutorConfig.build( diff --git a/dinky-core/src/main/java/org/dinky/job/JobManager.java b/dinky-core/src/main/java/org/dinky/job/JobManager.java index b49a2a0203..6935b207f5 100644 --- a/dinky-core/src/main/java/org/dinky/job/JobManager.java +++ b/dinky-core/src/main/java/org/dinky/job/JobManager.java @@ -50,11 +50,8 @@ import org.dinky.gateway.result.GatewayResult; import org.dinky.gateway.result.SavePointResult; import org.dinky.gateway.result.TestResult; -import org.dinky.job.builder.JobDDLBuilder; -import org.dinky.job.builder.JobExecuteBuilder; import org.dinky.job.builder.JobJarStreamGraphBuilder; -import org.dinky.job.builder.JobTransBuilder; -import org.dinky.job.builder.JobUDFBuilder; +import org.dinky.job.runner.JobJarRunner; import org.dinky.parser.SqlType; import org.dinky.trans.Operations; import org.dinky.trans.parse.AddFileSqlParseStrategy; @@ -111,6 +108,7 @@ public class JobManager { private boolean useRestAPI = false; private GatewayType runMode = GatewayType.LOCAL; private JobParam jobParam = null; + private JobStatementPlan jobStatementPlan = null; private String currentSql = ""; private final WeakReference dinkyClassLoader = new WeakReference<>(DinkyClassLoader.build()); private Job job; @@ -171,7 +169,7 @@ public boolean isUseGateway() { // return dinkyclassloader public DinkyClassLoader getDinkyClassLoader() { - return dinkyClassLoader.get(); + return Asserts.isNotNull(dinkyClassLoader.get()) ? dinkyClassLoader.get() : DinkyClassLoader.build(); } // return udfPathContextHolder @@ -221,15 +219,15 @@ public void init() { } private boolean ready() { - return handler.init(job); + return isPlanMode || handler.init(job); } private boolean success() { - return handler.success(); + return isPlanMode || handler.success(); } private boolean failed() { - return handler.failed(); + return isPlanMode || handler.failed(); } public boolean close() { @@ -256,12 +254,16 @@ public JobResult executeJarSql(String statement) throws Exception { .map(t -> executor.pretreatStatement(t)) .collect(Collectors.toList()); statement = String.join(";\n", statements); - jobParam = - Explainer.build(executor, useStatementSet, this).pretreatStatements(SqlUtil.getStatements(statement)); + jobStatementPlan = + Explainer.build(executor, useStatementSet, this).parseStatements(SqlUtil.getStatements(statement)); job = Job.build(runMode, config, executorConfig, executor, statement, useGateway); ready(); try { - JobJarStreamGraphBuilder.build(this).run(); + for (JobStatement jobStatement : jobStatementPlan.getJobStatementList()) { + JobJarRunner jobJarRunner = new JobJarRunner(this); + jobJarRunner.run(jobStatement); + } + if (job.isFailed()) { failed(); } else { @@ -288,18 +290,15 @@ public JobResult executeSql(String statement) throws Exception { ready(); DinkyClassLoaderUtil.initClassLoader(config, getDinkyClassLoader()); - jobParam = - Explainer.build(executor, useStatementSet, this).pretreatStatements(SqlUtil.getStatements(statement)); + jobStatementPlan = + Explainer.build(executor, useStatementSet, this).parseStatements(SqlUtil.getStatements(statement)); try { - // step 1: init udf - JobUDFBuilder.build(this).run(); - // step 2: execute ddl - JobDDLBuilder.build(this).run(); - // step 3: execute insert/select/show/desc/CTAS... - JobTransBuilder.build(this).run(); - // step 4: execute custom data stream task - JobExecuteBuilder.build(this).run(); - // finished + jobStatementPlan.buildFinalExecutableStatement(); + JobRunnerFactory jobRunnerFactory = JobRunnerFactory.create(this); + for (JobStatement jobStatement : jobStatementPlan.getJobStatementList()) { + jobRunnerFactory.getJobRunner(jobStatement.getStatementType()).run(jobStatement); + } + job.setEndTime(LocalDateTime.now()); if (job.isFailed()) { failed(); @@ -368,20 +367,15 @@ public static SelectResult getJobData(String jobId) { } public ExplainResult explainSql(String statement) { - return Explainer.build(executor, useStatementSet, this) - .initialize(config, statement) - .explainSql(statement); + return Explainer.build(executor, useStatementSet, this).explainSql(statement); } public ObjectNode getStreamGraph(String statement) { - return Explainer.build(executor, useStatementSet, this) - .initialize(config, statement) - .getStreamGraph(statement); + return Explainer.build(executor, useStatementSet, this).getStreamGraph(statement); } public String getJobPlanJson(String statement) { return Explainer.build(executor, useStatementSet, this) - .initialize(config, statement) .getJobPlanInfo(statement) .getJsonPlan(); } diff --git a/dinky-core/src/main/java/org/dinky/job/JobRunner.java b/dinky-core/src/main/java/org/dinky/job/JobRunner.java new file mode 100644 index 0000000000..faeac9becf --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/JobRunner.java @@ -0,0 +1,36 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job; + +import org.dinky.data.result.SqlExplainResult; + +import org.apache.flink.runtime.rest.messages.JobPlanInfo; +import org.apache.flink.streaming.api.graph.StreamGraph; + +public interface JobRunner { + + void run(JobStatement jobStatement) throws Exception; + + SqlExplainResult explain(JobStatement jobStatement); + + StreamGraph getStreamGraph(JobStatement jobStatement); + + JobPlanInfo getJobPlanInfo(JobStatement jobStatement); +} diff --git a/dinky-core/src/main/java/org/dinky/job/JobRunnerFactory.java b/dinky-core/src/main/java/org/dinky/job/JobRunnerFactory.java new file mode 100644 index 0000000000..fe550a287c --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/JobRunnerFactory.java @@ -0,0 +1,75 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job; + +import org.dinky.job.runner.JobAddRunner; +import org.dinky.job.runner.JobDDLRunner; +import org.dinky.job.runner.JobExecuteRunner; +import org.dinky.job.runner.JobPrintRunner; +import org.dinky.job.runner.JobSetRunner; +import org.dinky.job.runner.JobSqlRunner; +import org.dinky.job.runner.JobUDFRunner; + +public class JobRunnerFactory { + + private JobSetRunner jobSetRunner; + private JobAddRunner jobAddRunner; + private JobSqlRunner jobSqlRunner; + private JobExecuteRunner jobExecuteRunner; + private JobUDFRunner jobUDFRunner; + private JobPrintRunner jobPrintRunner; + private JobDDLRunner jobDDLRunner; + + public JobRunnerFactory(JobManager jobManager) { + this.jobSetRunner = new JobSetRunner(jobManager); + this.jobAddRunner = new JobAddRunner(jobManager); + this.jobSqlRunner = new JobSqlRunner(jobManager); + this.jobExecuteRunner = new JobExecuteRunner(jobManager); + this.jobUDFRunner = new JobUDFRunner(jobManager); + this.jobPrintRunner = new JobPrintRunner(jobManager); + this.jobDDLRunner = new JobDDLRunner(jobManager); + } + + public JobRunner getJobRunner(JobStatementType jobStatementType) { + switch (jobStatementType) { + case SET: + return jobSetRunner; + case ADD: + case ADD_FILE: + case ADD_JAR: + return jobAddRunner; + case SQL: + return jobSqlRunner; + case EXECUTE: + return jobExecuteRunner; + case UDF: + return jobUDFRunner; + case PRINT: + return jobPrintRunner; + case DDL: + default: + return jobDDLRunner; + } + } + + public static JobRunnerFactory create(JobManager jobManager) { + return new JobRunnerFactory(jobManager); + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/JobStatement.java b/dinky-core/src/main/java/org/dinky/job/JobStatement.java new file mode 100644 index 0000000000..085e376a5d --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/JobStatement.java @@ -0,0 +1,83 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job; + +import org.dinky.parser.SqlType; + +public class JobStatement { + + private int index; + private String statement; + private JobStatementType statementType; + private SqlType sqlType; + private boolean isGenerated; + private boolean isFinalExecutableStatement; + + public JobStatement( + int index, String statement, JobStatementType statementType, SqlType sqlType, boolean isGenerated) { + this.index = index; + this.statement = statement; + this.statementType = statementType; + this.sqlType = sqlType; + this.isGenerated = isGenerated; + this.isFinalExecutableStatement = false; + } + + public JobStatement(int index, String statement, JobStatementType statementType, boolean isGenerated) { + this.index = index; + this.statement = statement; + this.statementType = statementType; + this.isGenerated = isGenerated; + this.isFinalExecutableStatement = false; + } + + public int getIndex() { + return index; + } + + public String getStatement() { + return statement; + } + + public JobStatementType getStatementType() { + return statementType; + } + + public SqlType getSqlType() { + return sqlType; + } + + public boolean isGenerated() { + return isGenerated; + } + + public boolean isFinalExecutableStatement() { + return isFinalExecutableStatement; + } + + public void asFinalExecutableStatement() { + isFinalExecutableStatement = true; + } + + public static JobStatement generateJobStatement( + int index, String statement, JobStatementType statementType, SqlType sqlType) { + return new JobStatement(index, statement, statementType, sqlType, true); + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/JobStatementPlan.java b/dinky-core/src/main/java/org/dinky/job/JobStatementPlan.java new file mode 100644 index 0000000000..dbb6e78df3 --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/JobStatementPlan.java @@ -0,0 +1,60 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job; + +import org.dinky.parser.SqlType; + +import java.util.ArrayList; +import java.util.List; + +public class JobStatementPlan { + + private List jobStatementList = new ArrayList<>(); + + public JobStatementPlan() {} + + public List getJobStatementList() { + return jobStatementList; + } + + public void addJobStatement(String statement, JobStatementType statementType, SqlType sqlType) { + jobStatementList.add(new JobStatement(jobStatementList.size() + 1, statement, statementType, sqlType, false)); + } + + public void addJobStatementGenerated(String statement, JobStatementType statementType, SqlType sqlType) { + jobStatementList.add(new JobStatement(jobStatementList.size() + 1, statement, statementType, sqlType, true)); + } + + public void buildFinalExecutableStatement() { + if (jobStatementList.size() == 0) { + return; + } + int index = -1; + for (int i = 0; i < jobStatementList.size(); i++) { + if (JobStatementType.SQL.equals(jobStatementList.get(i).getStatementType())) { + index = i; + } + } + if (index < 0) { + return; + } + jobStatementList.get(index).asFinalExecutableStatement(); + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/JobStatementType.java b/dinky-core/src/main/java/org/dinky/job/JobStatementType.java new file mode 100644 index 0000000000..8a89df4d05 --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/JobStatementType.java @@ -0,0 +1,32 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job; + +public enum JobStatementType { + SET, + DDL, + ADD, + ADD_FILE, + ADD_JAR, + SQL, + EXECUTE, + PRINT, + UDF; +} diff --git a/dinky-core/src/main/java/org/dinky/job/runner/JobAddRunner.java b/dinky-core/src/main/java/org/dinky/job/runner/JobAddRunner.java new file mode 100644 index 0000000000..fc0e51bd72 --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/runner/JobAddRunner.java @@ -0,0 +1,76 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job.runner; + +import org.dinky.executor.CustomTableEnvironment; +import org.dinky.job.AbstractJobRunner; +import org.dinky.job.JobManager; +import org.dinky.job.JobStatement; +import org.dinky.trans.parse.AddFileSqlParseStrategy; +import org.dinky.trans.parse.AddJarSqlParseStrategy; +import org.dinky.utils.URLUtils; + +import org.apache.flink.configuration.Configuration; +import org.apache.flink.core.fs.FileSystem; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class JobAddRunner extends AbstractJobRunner { + + public JobAddRunner(JobManager jobManager) { + this.jobManager = jobManager; + } + + @Override + public void run(JobStatement jobStatement) throws Exception { + switch (jobStatement.getStatementType()) { + case ADD: + AddJarSqlParseStrategy.getAllFilePath(jobStatement.getStatement()) + .forEach(t -> jobManager.getUdfPathContextHolder().addOtherPlugins(t)); + (jobManager.getExecutor().getDinkyClassLoader()) + .addURLs(URLUtils.getURLs( + jobManager.getUdfPathContextHolder().getOtherPluginsFiles())); + break; + case ADD_FILE: + AddFileSqlParseStrategy.getAllFilePath(jobStatement.getStatement()) + .forEach(t -> jobManager.getUdfPathContextHolder().addFile(t)); + (jobManager.getExecutor().getDinkyClassLoader()) + .addURLs(URLUtils.getURLs( + jobManager.getUdfPathContextHolder().getFiles())); + break; + case ADD_JAR: + Configuration combinationConfig = getCombinationConfig(); + FileSystem.initialize(combinationConfig, null); + jobManager.getExecutor().executeSql(jobStatement.getStatement()); + break; + } + } + + private Configuration getCombinationConfig() { + CustomTableEnvironment cte = jobManager.getExecutor().getCustomTableEnvironment(); + Configuration rootConfig = cte.getRootConfiguration(); + Configuration config = cte.getConfig().getConfiguration(); + Configuration combinationConfig = new Configuration(); + combinationConfig.addAll(rootConfig); + combinationConfig.addAll(config); + return combinationConfig; + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/runner/JobDDLRunner.java b/dinky-core/src/main/java/org/dinky/job/runner/JobDDLRunner.java new file mode 100644 index 0000000000..7893f5f1a6 --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/runner/JobDDLRunner.java @@ -0,0 +1,80 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job.runner; + +import org.dinky.assertion.Asserts; +import org.dinky.data.result.SqlExplainResult; +import org.dinky.job.AbstractJobRunner; +import org.dinky.job.JobManager; +import org.dinky.job.JobStatement; +import org.dinky.utils.LogUtil; +import org.dinky.utils.SqlUtil; + +import java.time.LocalDateTime; + +import cn.hutool.core.text.StrFormatter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class JobDDLRunner extends AbstractJobRunner { + + public JobDDLRunner(JobManager jobManager) { + this.jobManager = jobManager; + } + + @Override + public void run(JobStatement jobStatement) throws Exception { + jobManager.getExecutor().executeSql(jobStatement.getStatement()); + } + + @Override + public SqlExplainResult explain(JobStatement jobStatement) { + SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); + try { + SqlExplainResult recordResult = jobManager.getExecutor().explainSqlRecord(jobStatement.getStatement()); + if (Asserts.isNull(recordResult)) { + return resultBuilder.isSkipped().build(); + } + resultBuilder = SqlExplainResult.newBuilder(recordResult); + // Flink DDL needs to execute to create catalog. + run(jobStatement); + resultBuilder + .explainTrue(true) + .type(jobStatement.getSqlType().getType()) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + } catch (Exception e) { + String error = StrFormatter.format( + "Exception in explaining FlinkSQL:\n{}\n{}", + SqlUtil.addLineNumber(jobStatement.getStatement()), + LogUtil.getError(e)); + resultBuilder + .error(error) + .explainTrue(false) + .type(jobStatement.getSqlType().getType()) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + log.error(error); + } finally { + resultBuilder.explainTime(LocalDateTime.now()); + return resultBuilder.build(); + } + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/runner/JobExecuteRunner.java b/dinky-core/src/main/java/org/dinky/job/runner/JobExecuteRunner.java new file mode 100644 index 0000000000..1e462c978a --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/runner/JobExecuteRunner.java @@ -0,0 +1,183 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job.runner; + +import org.dinky.assertion.Asserts; +import org.dinky.data.result.IResult; +import org.dinky.data.result.InsertResult; +import org.dinky.data.result.ResultBuilder; +import org.dinky.data.result.SqlExplainResult; +import org.dinky.gateway.Gateway; +import org.dinky.gateway.result.GatewayResult; +import org.dinky.job.AbstractJobRunner; +import org.dinky.job.Job; +import org.dinky.job.JobManager; +import org.dinky.job.JobStatement; +import org.dinky.parser.SqlType; +import org.dinky.utils.FlinkStreamEnvironmentUtil; +import org.dinky.utils.LogUtil; +import org.dinky.utils.SqlUtil; +import org.dinky.utils.URLUtils; + +import org.apache.flink.core.execution.JobClient; +import org.apache.flink.runtime.jobgraph.JobGraph; +import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings; +import org.apache.flink.streaming.api.graph.StreamGraph; + +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.List; + +import cn.hutool.core.text.StrFormatter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class JobExecuteRunner extends AbstractJobRunner { + + private List statements; + + public JobExecuteRunner(JobManager jobManager) { + this.jobManager = jobManager; + this.statements = new ArrayList<>(); + } + + @Override + public void run(JobStatement jobStatement) throws Exception { + statements.add(jobStatement.getStatement()); + jobManager.getExecutor().executeSql(jobStatement.getStatement()); + if (jobStatement.isFinalExecutableStatement()) { + if (jobManager.isUseGateway()) { + processWithGateway(); + } else { + processWithoutGateway(); + } + } + } + + @Override + public SqlExplainResult explain(JobStatement jobStatement) { + SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); + + try { + statements.add(jobStatement.getStatement()); + jobManager.getExecutor().executeSql(jobStatement.getStatement()); + if (jobStatement.isFinalExecutableStatement()) { + resultBuilder + .explain(FlinkStreamEnvironmentUtil.getStreamingPlanAsJSON( + jobManager.getExecutor().getStreamGraph())) + .type(jobStatement.getSqlType().getType()) + .parseTrue(true) + .explainTrue(true) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + } else { + resultBuilder + .sql(getParsedSql()) + .type(jobStatement.getSqlType().getType()) + .index(jobStatement.getIndex()) + .parseTrue(true) + .explainTrue(true) + .isSkipped(); + } + } catch (Exception e) { + String error = StrFormatter.format( + "Exception in explaining FlinkSQL:\n{}\n{}", + SqlUtil.addLineNumber(jobStatement.getStatement()), + LogUtil.getError(e)); + resultBuilder + .error(error) + .explainTrue(false) + .type(jobStatement.getSqlType().getType()) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + log.error(error); + } finally { + resultBuilder.explainTime(LocalDateTime.now()); + return resultBuilder.build(); + } + } + + private void processWithGateway() throws Exception { + GatewayResult gatewayResult = null; + jobManager.getConfig().addGatewayConfig(jobManager.getExecutor().getSetConfig()); + + if (jobManager.getRunMode().isApplicationMode()) { + jobManager.getConfig().getGatewayConfig().setSql(getParsedSql()); + gatewayResult = Gateway.build(jobManager.getConfig().getGatewayConfig()) + .submitJar(jobManager.getExecutor().getDinkyClassLoader().getUdfPathContextHolder()); + } else { + StreamGraph streamGraph = jobManager.getExecutor().getStreamGraph(); + streamGraph.setJobName(jobManager.getConfig().getJobName()); + JobGraph jobGraph = streamGraph.getJobGraph(); + if (Asserts.isNotNullString(jobManager.getConfig().getSavePointPath())) { + jobGraph.setSavepointRestoreSettings( + SavepointRestoreSettings.forPath(jobManager.getConfig().getSavePointPath(), true)); + } + gatewayResult = + Gateway.build(jobManager.getConfig().getGatewayConfig()).submitJobGraph(jobGraph); + } + jobManager.getJob().setResult(InsertResult.success(gatewayResult.getId())); + jobManager.getJob().setJobId(gatewayResult.getId()); + jobManager.getJob().setJids(gatewayResult.getJids()); + jobManager.getJob().setJobManagerAddress(URLUtils.formatAddress(gatewayResult.getWebURL())); + + if (gatewayResult.isSuccess()) { + jobManager.getJob().setStatus(Job.JobStatus.SUCCESS); + } else { + jobManager.getJob().setStatus(Job.JobStatus.FAILED); + jobManager.getJob().setError(gatewayResult.getError()); + } + } + + private void processWithoutGateway() throws Exception { + JobClient jobClient = + jobManager.getExecutor().executeAsync(jobManager.getConfig().getJobName()); + if (Asserts.isNotNull(jobClient)) { + jobManager.getJob().setJobId(jobClient.getJobID().toHexString()); + jobManager.getJob().setJids(new ArrayList() { + { + add(jobManager.getJob().getJobId()); + } + }); + } + if (jobManager.getConfig().isUseResult()) { + IResult result = ResultBuilder.build( + SqlType.EXECUTE, + jobManager.getJob().getId().toString(), + jobManager.getConfig().getMaxRowNum(), + jobManager.getConfig().isUseChangeLog(), + jobManager.getConfig().isUseAutoCancel(), + jobManager.getExecutor().getTimeZone()) + .getResult(null); + jobManager.getJob().setResult(result); + } + } + + private String getParsedSql() { + StringBuilder sb = new StringBuilder(); + for (String statement : statements) { + if (sb.length() > 0) { + sb.append(";\n"); + } + sb.append(statement); + } + return sb.toString(); + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/runner/JobJarRunner.java b/dinky-core/src/main/java/org/dinky/job/runner/JobJarRunner.java new file mode 100644 index 0000000000..4368313ba7 --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/runner/JobJarRunner.java @@ -0,0 +1,233 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job.runner; + +import org.dinky.assertion.Asserts; +import org.dinky.classloader.DinkyClassLoader; +import org.dinky.data.exception.DinkyException; +import org.dinky.data.result.InsertResult; +import org.dinky.data.result.SqlExplainResult; +import org.dinky.gateway.Gateway; +import org.dinky.gateway.config.GatewayConfig; +import org.dinky.gateway.result.GatewayResult; +import org.dinky.job.AbstractJobRunner; +import org.dinky.job.Job; +import org.dinky.job.JobManager; +import org.dinky.job.JobStatement; +import org.dinky.parser.SqlType; +import org.dinky.trans.Operations; +import org.dinky.trans.ddl.CustomSetOperation; +import org.dinky.trans.dml.ExecuteJarOperation; +import org.dinky.trans.parse.AddFileSqlParseStrategy; +import org.dinky.trans.parse.AddJarSqlParseStrategy; +import org.dinky.trans.parse.ExecuteJarParseStrategy; +import org.dinky.trans.parse.SetSqlParseStrategy; +import org.dinky.utils.DinkyClassLoaderUtil; +import org.dinky.utils.FlinkStreamEnvironmentUtil; +import org.dinky.utils.LogUtil; +import org.dinky.utils.SqlUtil; +import org.dinky.utils.URLUtils; + +import org.apache.flink.api.common.Plan; +import org.apache.flink.api.dag.Pipeline; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.PipelineOptions; +import org.apache.flink.core.execution.JobClient; +import org.apache.flink.runtime.jobgraph.JobGraph; +import org.apache.flink.runtime.jobgraph.SavepointConfigOptions; +import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings; +import org.apache.flink.streaming.api.graph.StreamGraph; + +import java.io.File; +import java.net.URL; +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import cn.hutool.core.lang.Assert; +import cn.hutool.core.text.StrFormatter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class JobJarRunner extends AbstractJobRunner { + + private final Configuration configuration; + + public JobJarRunner(JobManager jobManager) { + this.jobManager = jobManager; + configuration = + jobManager.getExecutor().getCustomTableEnvironment().getConfig().getConfiguration(); + } + + @Override + public void run(JobStatement jobStatement) throws Exception { + if (!jobManager.isUseGateway()) { + submitNormal(jobStatement); + } else { + GatewayResult gatewayResult; + if (jobManager.getRunMode().isApplicationMode()) { + gatewayResult = submitGateway(jobStatement); + } else { + gatewayResult = submitNormalWithGateway(jobStatement); + } + jobManager.getJob().setResult(InsertResult.success(gatewayResult.getId())); + jobManager.getJob().setJobId(gatewayResult.getId()); + jobManager.getJob().setJids(gatewayResult.getJids()); + jobManager.getJob().setJobManagerAddress(URLUtils.formatAddress(gatewayResult.getWebURL())); + + if (gatewayResult.isSuccess()) { + jobManager.getJob().setStatus(Job.JobStatus.SUCCESS); + } else { + jobManager.getJob().setStatus(Job.JobStatus.FAILED); + jobManager.getJob().setError(gatewayResult.getError()); + log.error(gatewayResult.getError()); + } + } + } + + @Override + public SqlExplainResult explain(JobStatement jobStatement) { + SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); + + try { + // Execute task does not support statement set. + Pipeline pipeline = getPipeline(jobStatement); + resultBuilder + .explain(FlinkStreamEnvironmentUtil.getStreamingPlanAsJSON(pipeline)) + .type(jobStatement.getSqlType().getType()) + .parseTrue(true) + .explainTrue(true) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + } catch (Exception e) { + String error = StrFormatter.format( + "Exception in explaining FlinkSQL:\n{}\n{}", + SqlUtil.addLineNumber(jobStatement.getStatement()), + LogUtil.getError(e)); + resultBuilder + .error(error) + .explainTrue(false) + .type(jobStatement.getSqlType().getType()) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + log.error(error); + } finally { + resultBuilder.explainTime(LocalDateTime.now()); + return resultBuilder.build(); + } + } + + private GatewayResult submitGateway(JobStatement jobStatement) throws Exception { + configuration.set(PipelineOptions.JARS, getUris(jobStatement.getStatement())); + jobManager.getConfig().addGatewayConfig(configuration); + jobManager.getConfig().getGatewayConfig().setSql(jobStatement.getStatement()); + return Gateway.build(jobManager.getConfig().getGatewayConfig()).submitJar(jobManager.getUdfPathContextHolder()); + } + + private GatewayResult submitNormalWithGateway(JobStatement jobStatement) { + Pipeline pipeline = getPipeline(jobStatement); + if (pipeline instanceof StreamGraph) { + ((StreamGraph) pipeline).setJobName(jobManager.getConfig().getJobName()); + } else if (pipeline instanceof Plan) { + ((Plan) pipeline).setJobName(jobManager.getConfig().getJobName()); + } + JobGraph jobGraph = FlinkStreamEnvironmentUtil.getJobGraph(pipeline, configuration); + GatewayConfig gatewayConfig = jobManager.getConfig().getGatewayConfig(); + List uriList = getUris(jobStatement.getStatement()); + String[] jarPaths = uriList.stream() + .map(URLUtils::toFile) + .map(File::getAbsolutePath) + .toArray(String[]::new); + gatewayConfig.setJarPaths(jarPaths); + return Gateway.build(gatewayConfig).submitJobGraph(jobGraph); + } + + private Pipeline getPipeline(JobStatement jobStatement) { + Pipeline pipeline = getJarStreamGraph(jobStatement.getStatement(), jobManager.getDinkyClassLoader()); + if (pipeline instanceof StreamGraph) { + if (Asserts.isNotNullString(jobManager.getConfig().getSavePointPath())) { + ((StreamGraph) pipeline) + .setSavepointRestoreSettings(SavepointRestoreSettings.forPath( + jobManager.getConfig().getSavePointPath(), + configuration.get(SavepointConfigOptions.SAVEPOINT_IGNORE_UNCLAIMED_STATE))); + } + } + return pipeline; + } + + private void submitNormal(JobStatement jobStatement) throws Exception { + JobClient jobClient = FlinkStreamEnvironmentUtil.executeAsync( + getPipeline(jobStatement), jobManager.getExecutor().getStreamExecutionEnvironment()); + if (Asserts.isNotNull(jobClient)) { + jobManager.getJob().setJobId(jobClient.getJobID().toHexString()); + jobManager.getJob().setJids(new ArrayList() { + { + add(jobManager.getJob().getJobId()); + } + }); + jobManager.getJob().setStatus(Job.JobStatus.SUCCESS); + } else { + jobManager.getJob().setStatus(Job.JobStatus.FAILED); + } + } + + public Pipeline getJarStreamGraph(String statement, DinkyClassLoader dinkyClassLoader) { + DinkyClassLoaderUtil.initClassLoader(jobManager.getConfig(), dinkyClassLoader); + String[] statements = SqlUtil.getStatements(statement); + ExecuteJarOperation executeJarOperation = null; + for (String sql : statements) { + String sqlStatement = jobManager.getExecutor().pretreatStatement(sql); + if (ExecuteJarParseStrategy.INSTANCE.match(sqlStatement)) { + executeJarOperation = new ExecuteJarOperation(sqlStatement); + break; + } + SqlType operationType = Operations.getOperationType(sqlStatement); + if (operationType.equals(SqlType.SET) && SetSqlParseStrategy.INSTANCE.match(sqlStatement)) { + CustomSetOperation customSetOperation = new CustomSetOperation(sqlStatement); + customSetOperation.execute(jobManager.getExecutor().getCustomTableEnvironment()); + } else if (operationType.equals(SqlType.ADD)) { + Set files = AddJarSqlParseStrategy.getAllFilePath(sqlStatement); + files.forEach(jobManager.getExecutor()::addJar); + files.forEach(jobManager.getUdfPathContextHolder()::addOtherPlugins); + } else if (operationType.equals(SqlType.ADD_FILE)) { + Set files = AddFileSqlParseStrategy.getAllFilePath(sqlStatement); + files.forEach(jobManager.getExecutor()::addJar); + files.forEach(jobManager.getUdfPathContextHolder()::addFile); + } + } + Assert.notNull(executeJarOperation, () -> new DinkyException("Not found execute jar operation.")); + List urLs = jobManager.getAllFileSet(); + return executeJarOperation.explain(jobManager.getExecutor().getCustomTableEnvironment(), urLs); + } + + public List getUris(String statement) { + String[] statements = SqlUtil.getStatements(statement); + List uriList = new ArrayList<>(); + for (String sql : statements) { + String sqlStatement = jobManager.getExecutor().pretreatStatement(sql); + if (ExecuteJarParseStrategy.INSTANCE.match(sqlStatement)) { + uriList.add(ExecuteJarParseStrategy.getInfo(statement).getUri()); + break; + } + } + return uriList; + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/runner/JobPrintRunner.java b/dinky-core/src/main/java/org/dinky/job/runner/JobPrintRunner.java new file mode 100644 index 0000000000..121fe4e873 --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/runner/JobPrintRunner.java @@ -0,0 +1,100 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job.runner; + +import org.dinky.data.result.SqlExplainResult; +import org.dinky.explainer.print_table.PrintStatementExplainer; +import org.dinky.job.AbstractJobRunner; +import org.dinky.job.JobManager; +import org.dinky.job.JobStatement; +import org.dinky.job.JobStatementType; +import org.dinky.parser.SqlType; +import org.dinky.utils.IpUtil; +import org.dinky.utils.LogUtil; +import org.dinky.utils.SqlUtil; + +import java.time.LocalDateTime; +import java.util.Map; + +import cn.hutool.core.text.StrFormatter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class JobPrintRunner extends AbstractJobRunner { + + public JobPrintRunner(JobManager jobManager) { + this.jobManager = jobManager; + } + + @Override + public void run(JobStatement jobStatement) throws Exception { + Map config = + jobManager.getExecutor().getExecutorConfig().getConfig(); + String host = config.getOrDefault("dinky.dinkyHost", IpUtil.getHostIp()); + int port = Integer.parseInt(config.getOrDefault("dinky.dinkyPrintPort", "7125")); + String[] tableNames = PrintStatementExplainer.getTableNames(jobStatement.getStatement()); + for (String tableName : tableNames) { + String ctasStatement = PrintStatementExplainer.getCreateStatement(tableName, host, port); + JobStatement ctasJobStatement = JobStatement.generateJobStatement( + jobStatement.getIndex(), ctasStatement, JobStatementType.SQL, SqlType.CTAS); + JobSqlRunner jobSqlRunner = new JobSqlRunner(jobManager); + jobSqlRunner.run(ctasJobStatement); + } + } + + @Override + public SqlExplainResult explain(JobStatement jobStatement) { + SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); + try { + Map config = + jobManager.getExecutor().getExecutorConfig().getConfig(); + String host = config.getOrDefault("dinky.dinkyHost", IpUtil.getHostIp()); + int port = Integer.parseInt(config.getOrDefault("dinky.dinkyPrintPort", "7125")); + String[] tableNames = PrintStatementExplainer.getTableNames(jobStatement.getStatement()); + for (String tableName : tableNames) { + String ctasStatement = PrintStatementExplainer.getCreateStatement(tableName, host, port); + JobStatement ctasJobStatement = JobStatement.generateJobStatement( + jobStatement.getIndex(), ctasStatement, JobStatementType.SQL, SqlType.CTAS); + JobSqlRunner jobSqlRunner = new JobSqlRunner(jobManager); + jobSqlRunner.explain(ctasJobStatement); + } + resultBuilder + .explainTrue(true) + .type(jobStatement.getSqlType().getType()) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + } catch (Exception e) { + String error = StrFormatter.format( + "Exception in explaining FlinkSQL:\n{}\n{}", + SqlUtil.addLineNumber(jobStatement.getStatement()), + LogUtil.getError(e)); + resultBuilder + .error(error) + .explainTrue(false) + .type(jobStatement.getSqlType().getType()) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + log.error(error); + } finally { + resultBuilder.explainTime(LocalDateTime.now()); + return resultBuilder.build(); + } + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/runner/JobSetRunner.java b/dinky-core/src/main/java/org/dinky/job/runner/JobSetRunner.java new file mode 100644 index 0000000000..740dab28b9 --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/runner/JobSetRunner.java @@ -0,0 +1,47 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job.runner; + +import org.dinky.job.AbstractJobRunner; +import org.dinky.job.JobManager; +import org.dinky.job.JobStatement; +import org.dinky.parser.SqlType; +import org.dinky.trans.ddl.CustomSetOperation; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class JobSetRunner extends AbstractJobRunner { + + public JobSetRunner(JobManager jobManager) { + this.jobManager = jobManager; + } + + @Override + public void run(JobStatement jobStatement) throws Exception { + if (SqlType.SET.equals(jobStatement.getSqlType())) { + CustomSetOperation customSetOperation = new CustomSetOperation(jobStatement.getStatement()); + customSetOperation.execute(jobManager.getExecutor().getCustomTableEnvironment()); + } else if (SqlType.RESET.equals(jobStatement.getSqlType())) { + // todo: reset + throw new RuntimeException("Not support reset operation."); + } + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/runner/JobSqlRunner.java b/dinky-core/src/main/java/org/dinky/job/runner/JobSqlRunner.java new file mode 100644 index 0000000000..48c93afd78 --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/runner/JobSqlRunner.java @@ -0,0 +1,307 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job.runner; + +import org.dinky.assertion.Asserts; +import org.dinky.data.enums.GatewayType; +import org.dinky.data.result.IResult; +import org.dinky.data.result.InsertResult; +import org.dinky.data.result.ResultBuilder; +import org.dinky.data.result.SqlExplainResult; +import org.dinky.executor.Executor; +import org.dinky.gateway.Gateway; +import org.dinky.gateway.result.GatewayResult; +import org.dinky.interceptor.FlinkInterceptor; +import org.dinky.interceptor.FlinkInterceptorResult; +import org.dinky.job.AbstractJobRunner; +import org.dinky.job.Job; +import org.dinky.job.JobConfig; +import org.dinky.job.JobManager; +import org.dinky.job.JobStatement; +import org.dinky.parser.SqlType; +import org.dinky.utils.LogUtil; +import org.dinky.utils.SqlUtil; +import org.dinky.utils.URLUtils; + +import org.apache.flink.runtime.jobgraph.JobGraph; +import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings; +import org.apache.flink.runtime.rest.messages.JobPlanInfo; +import org.apache.flink.streaming.api.graph.StreamGraph; +import org.apache.flink.table.api.TableResult; +import org.apache.flink.table.operations.CollectModifyOperation; +import org.apache.flink.table.operations.CreateTableASOperation; +import org.apache.flink.table.operations.ModifyOperation; +import org.apache.flink.table.operations.Operation; +import org.apache.flink.table.operations.QueryOperation; +import org.apache.flink.table.operations.ReplaceTableAsOperation; + +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import cn.hutool.core.text.StrFormatter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class JobSqlRunner extends AbstractJobRunner { + + private List statements; + + public JobSqlRunner(JobManager jobManager) { + this.jobManager = jobManager; + this.statements = new ArrayList<>(); + } + + @Override + public void run(JobStatement jobStatement) throws Exception { + if (jobManager.isUseStatementSet()) { + handleStatementSet(jobStatement); + } else { + handleNonStatementSet(jobStatement); + } + } + + @Override + public SqlExplainResult explain(JobStatement jobStatement) { + SqlExplainResult.Builder resultBuilder = SqlExplainResult.Builder.newBuilder(); + + try { + Operation operation = jobManager.getExecutor().getOperationFromStatement(jobStatement.getStatement()); + if (operation instanceof CreateTableASOperation) { + CreateTableASOperation createTableASOperation = (CreateTableASOperation) operation; + jobManager.getExecutor().executeOperation(createTableASOperation.getCreateTableOperation()); + jobManager + .getExecutor() + .addModifyOperations(createTableASOperation.toSinkModifyOperation( + jobManager.getExecutor().getCatalogManager())); + } else if (operation instanceof ReplaceTableAsOperation) { + ReplaceTableAsOperation replaceTableAsOperation = (ReplaceTableAsOperation) operation; + jobManager + .getExecutor() + .addModifyOperations(replaceTableAsOperation.toSinkModifyOperation( + jobManager.getExecutor().getCatalogManager())); + } else if (operation instanceof QueryOperation) { + jobManager.getExecutor().addModifyOperations(new CollectModifyOperation((QueryOperation) operation)); + } else if (operation instanceof ModifyOperation) { + jobManager.getExecutor().addModifyOperations((ModifyOperation) operation); + } + statements.add(jobStatement.getStatement()); + if (jobStatement.isFinalExecutableStatement()) { + SqlExplainResult sqlExplainResult = jobManager.getExecutor().explainModifyOperations(); + resultBuilder = SqlExplainResult.newBuilder(sqlExplainResult); + resultBuilder.sql(getParsedSql()).index(jobStatement.getIndex()); + } else { + resultBuilder + .sql(getParsedSql()) + .type(jobStatement.getSqlType().getType()) + .parseTrue(true) + .explainTrue(true) + .index(jobStatement.getIndex()) + .isSkipped(); + } + } catch (Exception e) { + String error = StrFormatter.format( + "Exception in explaining FlinkSQL:\n{}\n{}", + SqlUtil.addLineNumber(jobStatement.getStatement()), + LogUtil.getError(e)); + resultBuilder + .error(error) + .explainTrue(false) + .type(jobStatement.getSqlType().getType()) + .sql(jobStatement.getStatement()) + .index(jobStatement.getIndex()); + log.error(error); + } finally { + resultBuilder.explainTime(LocalDateTime.now()); + return resultBuilder.build(); + } + } + + public StreamGraph getStreamGraph(JobStatement jobStatement) { + addModifyOperations(jobStatement); + if (jobStatement.isFinalExecutableStatement()) { + jobManager.getExecutor().addOperatorFromModifyOperations(); + return jobManager.getExecutor().getStreamGraph(); + } + return null; + } + + public JobPlanInfo getJobPlanInfo(JobStatement jobStatement) { + addModifyOperations(jobStatement); + if (jobStatement.isFinalExecutableStatement()) { + jobManager.getExecutor().addOperatorFromModifyOperations(); + return jobManager.getExecutor().getJobPlanInfo(); + } + return null; + } + + private void addModifyOperations(JobStatement jobStatement) { + Operation operation = jobManager.getExecutor().getOperationFromStatement(jobStatement.getStatement()); + if (operation instanceof ModifyOperation) { + jobManager.getExecutor().addModifyOperations((ModifyOperation) operation); + } else if (operation instanceof QueryOperation) { + jobManager.getExecutor().addModifyOperations(new CollectModifyOperation((QueryOperation) operation)); + } + } + + private void handleStatementSet(JobStatement jobStatement) throws Exception { + if (jobManager.isUseGateway()) { + processWithGateway(jobStatement); + } else { + processWithoutGateway(jobStatement); + } + } + + private void handleNonStatementSet(JobStatement jobStatement) throws Exception { + if (jobManager.isUseGateway()) { + processWithGateway(jobStatement); + } else { + processFirstStatement(jobStatement); + } + } + + private void processWithGateway(JobStatement jobStatement) throws Exception { + GatewayResult gatewayResult = submitByGateway(jobStatement); + if (Asserts.isNotNull(gatewayResult)) { + setJobResultFromGatewayResult(gatewayResult); + } + } + + private void processWithoutGateway(JobStatement jobStatement) throws Exception { + Operation operation = jobManager.getExecutor().getOperationFromStatement(jobStatement.getStatement()); + if (operation instanceof ModifyOperation) { + jobManager.getExecutor().addModifyOperations((ModifyOperation) operation); + statements.add(jobStatement.getStatement()); + } else if (operation instanceof QueryOperation) { + log.info("Select statement is skipped when execute sink task in application mode."); + } + if (jobStatement.isFinalExecutableStatement()) { + TableResult tableResult = jobManager.getExecutor().executeModifyOperations(); + updateJobWithTableResult(tableResult); + } + } + + private void processFirstStatement(JobStatement jobStatement) throws Exception { + processSingleStatement(jobStatement); + } + + private void processSingleStatement(JobStatement jobStatement) throws Exception { + FlinkInterceptorResult flinkInterceptorResult = + FlinkInterceptor.build(jobManager.getExecutor(), jobStatement.getStatement()); + if (Asserts.isNotNull(flinkInterceptorResult.getTableResult())) { + updateJobWithTableResult(flinkInterceptorResult.getTableResult(), jobStatement.getSqlType()); + } else if (!flinkInterceptorResult.isNoExecute()) { + TableResult tableResult = jobManager.getExecutor().executeSql(jobStatement.getStatement()); + updateJobWithTableResult(tableResult, jobStatement.getSqlType()); + } + } + + private void setJobResultFromGatewayResult(GatewayResult gatewayResult) { + jobManager.getJob().setResult(InsertResult.success(gatewayResult.getId())); + jobManager.getJob().setJobId(gatewayResult.getId()); + jobManager.getJob().setJids(gatewayResult.getJids()); + jobManager.getJob().setJobManagerAddress(URLUtils.formatAddress(gatewayResult.getWebURL())); + jobManager.getJob().setStatus(gatewayResult.isSuccess() ? Job.JobStatus.SUCCESS : Job.JobStatus.FAILED); + if (!gatewayResult.isSuccess()) { + jobManager.getJob().setError(gatewayResult.getError()); + } + } + + private void updateJobWithTableResult(TableResult tableResult) { + updateJobWithTableResult(tableResult, SqlType.INSERT); + } + + private void updateJobWithTableResult(TableResult tableResult, SqlType sqlType) { + if (tableResult.getJobClient().isPresent()) { + jobManager + .getJob() + .setJobId(tableResult.getJobClient().get().getJobID().toHexString()); + jobManager + .getJob() + .setJids(Collections.singletonList(jobManager.getJob().getJobId())); + } else if (!sqlType.getCategory().getHasJobClient()) { + jobManager.getJob().setJobId(UUID.randomUUID().toString().replace("-", "")); + jobManager + .getJob() + .setJids(Collections.singletonList(jobManager.getJob().getJobId())); + } + + if (jobManager.getConfig().isUseResult()) { + IResult result = ResultBuilder.build( + sqlType, + jobManager.getJob().getId().toString(), + jobManager.getConfig().getMaxRowNum(), + jobManager.getConfig().isUseChangeLog(), + jobManager.getConfig().isUseAutoCancel(), + jobManager.getExecutor().getTimeZone()) + .getResultWithPersistence(tableResult, jobManager.getHandler()); + jobManager.getJob().setResult(result); + } + } + + private GatewayResult submitByGateway(JobStatement jobStatement) { + GatewayResult gatewayResult = null; + + JobConfig config = jobManager.getConfig(); + GatewayType runMode = jobManager.getRunMode(); + Executor executor = jobManager.getExecutor(); + + statements.add(jobStatement.getStatement()); + // Use gateway need to build gateway config, include flink configuration. + config.addGatewayConfig(executor.getCustomTableEnvironment().getConfig().getConfiguration()); + + if (runMode.isApplicationMode()) { + if (!jobStatement.isFinalExecutableStatement()) { + return gatewayResult; + } + // Application mode need to submit dinky-app.jar that in the hdfs or image. + config.getGatewayConfig().setSql(getParsedSql()); + gatewayResult = Gateway.build(config.getGatewayConfig()) + .submitJar(executor.getDinkyClassLoader().getUdfPathContextHolder()); + } else { + ModifyOperation modifyOperation = executor.getModifyOperationFromInsert(jobStatement.getStatement()); + jobManager.getExecutor().addModifyOperations(modifyOperation); + if (!jobStatement.isFinalExecutableStatement()) { + return gatewayResult; + } + JobGraph jobGraph = executor.getStreamGraphModifyOperations().getJobGraph(); + // Perjob mode need to set savepoint restore path, when recovery from savepoint. + if (Asserts.isNotNullString(config.getSavePointPath())) { + jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(config.getSavePointPath(), true)); + } + // Perjob mode need to submit job graph. + gatewayResult = Gateway.build(config.getGatewayConfig()).submitJobGraph(jobGraph); + } + return gatewayResult; + } + + private String getParsedSql() { + StringBuilder sb = new StringBuilder(); + for (String statement : statements) { + if (sb.length() > 0) { + sb.append(";\n"); + } + sb.append(statement); + } + return sb.toString(); + } +} diff --git a/dinky-core/src/main/java/org/dinky/job/runner/JobUDFRunner.java b/dinky-core/src/main/java/org/dinky/job/runner/JobUDFRunner.java new file mode 100644 index 0000000000..b71a09b88a --- /dev/null +++ b/dinky-core/src/main/java/org/dinky/job/runner/JobUDFRunner.java @@ -0,0 +1,124 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job.runner; + +import static org.dinky.function.util.UDFUtil.GATEWAY_TYPE_MAP; +import static org.dinky.function.util.UDFUtil.SESSION; +import static org.dinky.function.util.UDFUtil.YARN; + +import org.dinky.data.model.SystemConfiguration; +import org.dinky.function.data.model.UDF; +import org.dinky.function.util.UDFUtil; +import org.dinky.job.AbstractJobRunner; +import org.dinky.job.JobManager; +import org.dinky.job.JobStatement; +import org.dinky.utils.URLUtils; + +import java.io.File; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import cn.hutool.core.collection.CollUtil; +import cn.hutool.core.util.ArrayUtil; +import cn.hutool.core.util.RandomUtil; +import cn.hutool.core.util.StrUtil; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class JobUDFRunner extends AbstractJobRunner { + + public JobUDFRunner(JobManager jobManager) { + this.jobManager = jobManager; + } + + @Override + public void run(JobStatement jobStatement) throws Exception { + List udfList = jobManager.getJobParam().getUdfList(); + Integer taskId = jobManager.getConfig().getTaskId(); + if (taskId == null) { + taskId = -RandomUtil.randomInt(0, 1000); + } + // 1. Obtain the path of the jar package and inject it into the remote environment + List jarFiles = + new ArrayList<>(jobManager.getUdfPathContextHolder().getAllFileSet()); + + String[] userCustomUdfJarPath = UDFUtil.initJavaUDF(udfList, taskId); + String[] jarPaths = CollUtil.removeNull(jarFiles).stream() + .map(File::getAbsolutePath) + .toArray(String[]::new); + if (GATEWAY_TYPE_MAP.get(SESSION).contains(jobManager.getRunMode())) { + jobManager.getConfig().setJarFiles(jarPaths); + } + + // 2.Compile Python + String[] pyPaths = UDFUtil.initPythonUDF( + udfList, + jobManager.getRunMode(), + jobManager.getConfig().getTaskId(), + jobManager.getExecutor().getTableConfig().getConfiguration()); + + jobManager.getExecutor().initUDF(userCustomUdfJarPath); + jobManager.getExecutor().initUDF(jarPaths); + + if (ArrayUtil.isNotEmpty(pyPaths)) { + for (String pyPath : pyPaths) { + if (StrUtil.isNotBlank(pyPath)) { + jarFiles.add(new File(pyPath)); + jobManager.getUdfPathContextHolder().addPyUdfPath(new File(pyPath)); + } + } + } + if (ArrayUtil.isNotEmpty(userCustomUdfJarPath)) { + for (String jarPath : userCustomUdfJarPath) { + if (StrUtil.isNotBlank(jarPath)) { + jarFiles.add(new File(jarPath)); + jobManager.getUdfPathContextHolder().addUdfPath(new File(jarPath)); + } + } + } + + Set pyUdfFile = jobManager.getUdfPathContextHolder().getPyUdfFile(); + jobManager + .getExecutor() + .initPyUDF( + SystemConfiguration.getInstances().getPythonHome(), + pyUdfFile.stream().map(File::getAbsolutePath).toArray(String[]::new)); + if (GATEWAY_TYPE_MAP.get(YARN).contains(jobManager.getRunMode())) { + jobManager.getConfig().getGatewayConfig().setJarPaths(ArrayUtil.append(jarPaths, pyPaths)); + } + + try { + List jarList = CollUtil.newArrayList(URLUtils.getURLs(jarFiles)); + // 3.Write the required files for UDF + UDFUtil.writeManifest(taskId, jarList, jobManager.getUdfPathContextHolder()); + UDFUtil.addConfigurationClsAndJars( + jobManager.getExecutor().getCustomTableEnvironment(), + jarList, + CollUtil.newArrayList(URLUtils.getURLs(jarFiles))); + } catch (Exception e) { + throw new RuntimeException("add configuration failed: ", e); + } + + log.info(StrUtil.format("A total of {} UDF have been Init.", udfList.size() + pyUdfFile.size())); + log.info("Initializing Flink UDF...Finish"); + } +} diff --git a/dinky-core/src/test/java/org/dinky/core/JobManagerTest.java b/dinky-core/src/test/java/org/dinky/core/JobManagerTest.java deleted file mode 100644 index 978a92024b..0000000000 --- a/dinky-core/src/test/java/org/dinky/core/JobManagerTest.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.dinky.core; - -import org.dinky.data.enums.GatewayType; -import org.dinky.data.result.ResultPool; -import org.dinky.data.result.SelectResult; -import org.dinky.job.JobConfig; -import org.dinky.job.JobManager; -import org.dinky.job.JobResult; - -import org.junit.Ignore; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * JobManagerTest - * - * @since 2021/6/3 - */ -@Ignore -public class JobManagerTest { - - private static final Logger LOGGER = LoggerFactory.getLogger(JobManagerTest.class); - - @Ignore - @Test - public void cancelJobSelect() throws Exception { - JobConfig config = JobConfig.builder() - .type(GatewayType.YARN_SESSION.getLongValue()) - .useResult(true) - .useChangeLog(true) - .useAutoCancel(true) - .clusterId(2) - .jobName("Test") - .fragment(false) - .statementSet(false) - .batchModel(false) - .maxRowNum(100) - .parallelism(1) - .build(); - if (config.isUseRemote()) { - config.setAddress("192.168.123.157:8081"); - } - JobManager jobManager = JobManager.build(config); - String sql1 = "CREATE TABLE Orders (\n" - + " order_number BIGINT,\n" - + " price DECIMAL(32,2),\n" - + " order_time TIMESTAMP(3)\n" - + ") WITH (\n" - + " 'connector' = 'datagen',\n" - + " 'rows-per-second' = '1'\n" - + ");"; - String sql3 = "select order_number,price,order_time from Orders"; - String sql = sql1 + sql3; - JobResult result = jobManager.executeSql(sql); - SelectResult selectResult = ResultPool.get(result.getJobId()); - LOGGER.info("sql:{}, execute result:{}", sql, result.isSuccess()); - } -} diff --git a/dinky-core/src/test/java/org/dinky/job/JobManagerTest.java b/dinky-core/src/test/java/org/dinky/job/JobManagerTest.java new file mode 100644 index 0000000000..46f90a03fd --- /dev/null +++ b/dinky-core/src/test/java/org/dinky/job/JobManagerTest.java @@ -0,0 +1,206 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job; + +import static org.junit.jupiter.api.Assertions.*; + +import org.dinky.data.enums.GatewayType; +import org.dinky.data.result.ExplainResult; +import org.dinky.executor.ExecutorConfig; +import org.dinky.explainer.lineage.LineageBuilder; +import org.dinky.explainer.lineage.LineageResult; + +import org.apache.commons.io.IOUtils; +import org.apache.flink.shaded.guava31.com.google.common.io.Resources; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import org.junit.jupiter.api.Test; + +import com.fasterxml.jackson.databind.node.ObjectNode; + +class JobManagerTest { + + private JobConfig config; + + private JobManager jobManager; + + private final ObjectMapper mapper = new ObjectMapper(); + + void initLocalStreamPlanEnvironment() { + config = JobConfig.builder() + .fragment(true) + .statementSet(true) + .type(GatewayType.LOCAL.getLongValue()) + .parallelism(1) + .maxRowNum(100) + .useAutoCancel(true) + .useChangeLog(false) + .useRemote(false) + .useResult(false) + .batchModel(false) + .jobName("Test") + .checkpoint(1000) + .build(); + jobManager = JobManager.buildPlanMode(config); + } + + void initLocalBatchPlanEnvironment() { + config = JobConfig.builder() + .fragment(true) + .statementSet(true) + .type(GatewayType.LOCAL.getLongValue()) + .parallelism(1) + .maxRowNum(100) + .useAutoCancel(true) + .useChangeLog(false) + .useRemote(false) + .useResult(false) + .batchModel(true) + .jobName("Test") + .checkpoint(1000) + .build(); + jobManager = JobManager.buildPlanMode(config); + } + + @Test + void testExplainSql() throws Exception { + checkExplainStreamSqlFromFile("flink/sql/statement-set-stream.sql", 16); + checkExplainBatchSqlFromFile("flink/sql/statement-set-batch.sql", 16); + } + + @Test + void testGetStreamGraph() throws Exception { + checkGetStreamGraphFromFile("flink/sql/statement-set-stream.sql", 21); + checkGetBatchStreamGraphFromFile("flink/sql/statement-set-batch.sql", 23); + } + + @Test + void testGetJobPlanJson() throws Exception { + checkGetStreamJobPlanJsonFromFile("flink/sql/statement-set-stream.sql"); + checkGetBatchJobPlanJsonFromFile("flink/sql/statement-set-batch.sql"); + } + + @Test + void testExecuteSql() throws Exception { + checkStreamExecuteSqlFromFile("flink/sql/statement-set-stream.sql"); + checkBatchExecuteSqlFromFile("flink/sql/statement-set-batch.sql"); + } + + @Test + void testLineageSqlSingle() throws Exception { + String statement = + IOUtils.toString(Resources.getResource("flink/sql/single-insert.sql"), StandardCharsets.UTF_8); + LineageResult result = LineageBuilder.getColumnLineageByLogicalPlan(statement, ExecutorConfig.DEFAULT); + assertNotNull(result); + assertEquals(2, result.getTables().size()); + assertEquals(4, result.getRelations().size()); + } + + private void checkExplainStreamSqlFromFile(String path, int total) throws IOException { + String statement = IOUtils.toString(Resources.getResource(path), StandardCharsets.UTF_8); + initLocalStreamPlanEnvironment(); + checkExplainSql(statement, total); + jobManager.close(); + } + + private void checkExplainBatchSqlFromFile(String path, int total) throws IOException { + String statement = IOUtils.toString(Resources.getResource(path), StandardCharsets.UTF_8); + initLocalBatchPlanEnvironment(); + checkExplainSql(statement, total); + jobManager.close(); + } + + private void checkExplainSql(String statement, int total) throws IOException { + ExplainResult explainResult = jobManager.explainSql(statement); + assertNotNull(explainResult); + assertTrue(explainResult.isCorrect()); + assertEquals(total, explainResult.getTotal()); + explainResult.getSqlExplainResults().forEach(sqlExplainResult -> { + assertTrue(sqlExplainResult.isParseTrue()); + assertTrue(sqlExplainResult.isExplainTrue()); + if (!sqlExplainResult.isParseTrue() || !sqlExplainResult.isExplainTrue()) { + throw new RuntimeException(sqlExplainResult.getError()); + } + }); + } + + private void checkGetStreamGraphFromFile(String path, int total) throws IOException { + String statement = IOUtils.toString(Resources.getResource(path), StandardCharsets.UTF_8); + initLocalStreamPlanEnvironment(); + checkGetStreamGraph(statement, total); + jobManager.close(); + } + + private void checkGetBatchStreamGraphFromFile(String path, int total) throws IOException { + String statement = IOUtils.toString(Resources.getResource(path), StandardCharsets.UTF_8); + initLocalBatchPlanEnvironment(); + checkGetStreamGraph(statement, total); + jobManager.close(); + } + + private void checkGetStreamGraph(String statement, int total) throws IOException { + ObjectNode streamGraph = jobManager.getStreamGraph(statement); + assertNotNull(streamGraph); + assertNotNull(streamGraph.get("nodes")); + assertEquals(total, streamGraph.get("nodes").size()); + } + + private void checkGetStreamJobPlanJsonFromFile(String path) throws IOException { + String statement = IOUtils.toString(Resources.getResource(path), StandardCharsets.UTF_8); + initLocalStreamPlanEnvironment(); + checkGetJobPlanJson(statement); + jobManager.close(); + } + + private void checkGetBatchJobPlanJsonFromFile(String path) throws IOException { + String statement = IOUtils.toString(Resources.getResource(path), StandardCharsets.UTF_8); + initLocalBatchPlanEnvironment(); + checkGetJobPlanJson(statement); + jobManager.close(); + } + + private void checkGetJobPlanJson(String statement) throws IOException { + String jobPlanJson = jobManager.getJobPlanJson(statement); + assertNotNull(jobPlanJson); + } + + private void checkStreamExecuteSqlFromFile(String path) throws Exception { + String statement = IOUtils.toString(Resources.getResource(path), StandardCharsets.UTF_8); + initLocalStreamPlanEnvironment(); + checkExecuteSql(statement); + jobManager.close(); + } + + private void checkBatchExecuteSqlFromFile(String path) throws Exception { + String statement = IOUtils.toString(Resources.getResource(path), StandardCharsets.UTF_8); + initLocalBatchPlanEnvironment(); + checkExecuteSql(statement); + jobManager.close(); + } + + private void checkExecuteSql(String statement) throws Exception { + JobResult jobResult = jobManager.executeSql(statement); + assertNotNull(jobResult); + assertTrue(jobResult.isSuccess()); + } +} diff --git a/dinky-core/src/test/java/org/dinky/job/JobTestHandler.java b/dinky-core/src/test/java/org/dinky/job/JobTestHandler.java new file mode 100644 index 0000000000..6e36d88b73 --- /dev/null +++ b/dinky-core/src/test/java/org/dinky/job/JobTestHandler.java @@ -0,0 +1,67 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.dinky.job; + +import java.util.List; + +public class JobTestHandler implements JobHandler { + @Override + public boolean init(Job job) { + return true; + } + + @Override + public boolean ready() { + return true; + } + + @Override + public boolean running() { + return true; + } + + @Override + public boolean success() { + return true; + } + + @Override + public boolean failed() { + return true; + } + + @Override + public boolean callback() { + return true; + } + + @Override + public boolean close() { + return true; + } + + @Override + public void persistResultData(List jobIds) {} + + @Override + public JobReadHandler getReadHandler() { + return null; + } +} diff --git a/dinky-core/src/test/resources/META-INF/services/org.dinky.job.JobHandler b/dinky-core/src/test/resources/META-INF/services/org.dinky.job.JobHandler new file mode 100644 index 0000000000..bab0c103f1 --- /dev/null +++ b/dinky-core/src/test/resources/META-INF/services/org.dinky.job.JobHandler @@ -0,0 +1 @@ +org.dinky.job.JobTestHandler \ No newline at end of file diff --git a/dinky-core/src/test/resources/flink/sql/single-insert.sql b/dinky-core/src/test/resources/flink/sql/single-insert.sql new file mode 100644 index 0000000000..3e3044e7e9 --- /dev/null +++ b/dinky-core/src/test/resources/flink/sql/single-insert.sql @@ -0,0 +1,26 @@ +CREATE TABLE datagen_source +( + id BIGINT, + name STRING, + sex INT, + age INT +) WITH ( + 'connector' = 'datagen' + ); + +CREATE TABLE print_sink +( + id BIGINT, + name STRING, + sex INT, + age INT +) WITH ( + 'connector' = 'print' + ); + +INSERT INTO print_sink +SELECT id, + name, + sex, + age +from datagen_source; \ No newline at end of file diff --git a/dinky-core/src/test/resources/flink/sql/statement-set-batch.sql b/dinky-core/src/test/resources/flink/sql/statement-set-batch.sql new file mode 100644 index 0000000000..a6ba486966 --- /dev/null +++ b/dinky-core/src/test/resources/flink/sql/statement-set-batch.sql @@ -0,0 +1,98 @@ +CREATE TABLE datagen_source +( + id BIGINT, + name STRING, + sex INT, + age INT +) WITH ( + 'connector' = 'datagen', + 'number-of-rows' = '10' + ); + +CREATE TABLE print_sink +( + id BIGINT, + name STRING, + sex INT, + age INT +) WITH ( + 'connector' = 'print' + ); + +CREATE TABLE print_sink2 +( + sex BIGINT, + total BIGINT +) WITH ( + 'connector' = 'print' + ); + +CREATE TABLE print_sink3 +( + id BIGINT, + name STRING, + sex INT, + age INT +) WITH ( + 'connector' = 'print' + ); + +INSERT INTO print_sink +SELECT id, + name, + sex, + age +from datagen_source /*+ OPTIONS('rows-per-second'='1') */ ; + +SELECT id as select_id, + name as select_name +from datagen_source; + +SHOW TABLES; + +WITH sex_with AS ( + SELECT id, sex + FROM datagen_source +) +SELECT sex, COUNT(1) as cnt +FROM sex_with +GROUP BY sex; + +INSERT INTO print_sink2 +SELECT sex, COUNT(1) as total +FROM datagen_source +GROUP BY sex; + +CREATE TABLE print_sink4 +WITH ( + 'connector' = 'print' +) +AS SELECT id, name, sex, age FROM datagen_source WHERE mod(id, 10) = 4; + +REPLACE TABLE print_sink3 +WITH ( + 'connector' = 'print' +) +AS SELECT id, name, sex, age FROM datagen_source WHERE mod(id, 10) = 0; + +CREATE VIEW t1(s) AS VALUES ('c'), ('a'), ('b'), ('b'), ('c'); + +CREATE VIEW t2(s) AS VALUES ('d'), ('e'), ('a'), ('b'), ('b'); + +(SELECT s FROM t1) UNION (SELECT s FROM t2); + +ALTER TABLE print_sink3 RENAME TO print_sink5; + +DESCRIBE print_sink5; + +USE MODULES core; + +SHOW TABLES; + +SHOW CREATE VIEW t1; + +UNLOAD MODULE core; + +SET 'table.local-time-zone' = 'Europe/Berlin'; + +DROP TABLE print_sink5; \ No newline at end of file diff --git a/dinky-core/src/test/resources/flink/sql/statement-set-stream.sql b/dinky-core/src/test/resources/flink/sql/statement-set-stream.sql new file mode 100644 index 0000000000..b3b6067355 --- /dev/null +++ b/dinky-core/src/test/resources/flink/sql/statement-set-stream.sql @@ -0,0 +1,97 @@ +CREATE TABLE datagen_source +( + id BIGINT, + name STRING, + sex INT, + age INT +) WITH ( + 'connector' = 'datagen' + ); + +CREATE TABLE print_sink +( + id BIGINT, + name STRING, + sex INT, + age INT +) WITH ( + 'connector' = 'print' + ); + +CREATE TABLE print_sink2 +( + sex BIGINT, + total BIGINT +) WITH ( + 'connector' = 'print' + ); + +CREATE TABLE print_sink3 +( + id BIGINT, + name STRING, + sex INT, + age INT +) WITH ( + 'connector' = 'print' + ); + +INSERT INTO print_sink +SELECT id, + name, + sex, + age +from datagen_source /*+ OPTIONS('rows-per-second'='1') */ ; + +SELECT id as select_id, + name as select_name +from datagen_source; + +WITH sex_with AS ( + SELECT id, sex + FROM datagen_source +) +SELECT sex, COUNT(1) as cnt +FROM sex_with +GROUP BY sex; + +INSERT INTO print_sink2 +SELECT sex, COUNT(1) as total +FROM datagen_source +GROUP BY sex; + +CREATE TABLE print_sink4 +WITH ( + 'connector' = 'print' +) +AS SELECT id, name, sex, age FROM datagen_source WHERE mod(id, 10) = 4; + +REPLACE TABLE print_sink3 +WITH ( + 'connector' = 'print' +) +AS SELECT id, name, sex, age FROM datagen_source WHERE mod(id, 10) = 0; + +CREATE VIEW t1(s) AS VALUES ('c'), ('a'), ('b'), ('b'), ('c'); + +CREATE VIEW t2(s) AS VALUES ('d'), ('e'), ('a'), ('b'), ('b'); + +(SELECT s FROM t1) UNION (SELECT s FROM t2); + +ALTER TABLE print_sink3 RENAME TO print_sink5; + +DESCRIBE print_sink5; + +EXPLAIN PLAN_ADVICE SELECT id, name, sex, age FROM datagen_source; + +USE MODULES core; + +SHOW TABLES; + +SHOW CREATE VIEW t1; + +UNLOAD MODULE core; + +SET 'table.local-time-zone' = 'Europe/Berlin'; + +DROP TABLE print_sink5; \ No newline at end of file diff --git a/dinky-function/src/main/java/org/dinky/function/util/UDFUtil.java b/dinky-function/src/main/java/org/dinky/function/util/UDFUtil.java index 24d86c33d3..fbc9ef19fc 100644 --- a/dinky-function/src/main/java/org/dinky/function/util/UDFUtil.java +++ b/dinky-function/src/main/java/org/dinky/function/util/UDFUtil.java @@ -304,6 +304,10 @@ public static boolean isUdfStatement(Pattern pattern, String statement) { return !StrUtil.isBlank(statement) && CollUtil.isNotEmpty(ReUtil.findAll(pattern, statement, 0)); } + public static boolean isUdfStatement(String statement) { + return isUdfStatement(PATTERN, statement); + } + public static UDF toUDF(String statement, DinkyClassLoader classLoader) { if (isUdfStatement(PATTERN, statement)) { List groups = CollUtil.removeEmpty(ReUtil.getAllGroups(PATTERN, statement)); diff --git a/dinky-web/src/pages/DataStudio/BottomContainer/Result/index.tsx b/dinky-web/src/pages/DataStudio/BottomContainer/Result/index.tsx index 338356afe8..864311ddbb 100644 --- a/dinky-web/src/pages/DataStudio/BottomContainer/Result/index.tsx +++ b/dinky-web/src/pages/DataStudio/BottomContainer/Result/index.tsx @@ -200,22 +200,16 @@ const Result = (props: any) => { const renderFlinkSQLContent = () => { return ( - <> - {current?.jobInstanceId && !data.destroyed ? ( - <> - - - - - ) : undefined} - + + + ); }; const renderDownloadButton = () => {