diff --git a/paimon-spark/paimon-spark-3.2/pom.xml b/paimon-spark/paimon-spark-3.2/pom.xml index b12f4ba86d00..6f8d06f4c6df 100644 --- a/paimon-spark/paimon-spark-3.2/pom.xml +++ b/paimon-spark/paimon-spark-3.2/pom.xml @@ -36,17 +36,17 @@ under the License. - - org.apache.paimon - paimon-bundle - ${project.version} - - - * - * - - - + + + + + + + + + + + org.apache.paimon diff --git a/paimon-spark/paimon-spark-common/pom.xml b/paimon-spark/paimon-spark-common/pom.xml index 36139e283261..803cc6779df2 100644 --- a/paimon-spark/paimon-spark-common/pom.xml +++ b/paimon-spark/paimon-spark-common/pom.xml @@ -38,17 +38,17 @@ under the License. - - org.apache.paimon - paimon-bundle - ${project.version} - - - * - * - - - + + + + + + + + + + + org.scala-lang diff --git a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonPartitionManagement.scala b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonPartitionManagement.scala index 58295c9acadf..fa8a9d7e1b04 100644 --- a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonPartitionManagement.scala +++ b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonPartitionManagement.scala @@ -25,6 +25,7 @@ import org.apache.paimon.table.sink.BatchWriteBuilder import org.apache.paimon.types.RowType import org.apache.paimon.utils.{InternalRowPartitionComputer, TypeUtils} +import org.apache.spark.internal.Logging import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow} import org.apache.spark.sql.catalyst.util.CharVarcharUtils @@ -36,7 +37,7 @@ import java.util.{Map => JMap, Objects, UUID} import scala.collection.JavaConverters._ -trait PaimonPartitionManagement extends SupportsAtomicPartitionManagement { +trait PaimonPartitionManagement extends SupportsAtomicPartitionManagement with Logging { self: SparkTable => private lazy val partitionRowType: RowType = TypeUtils.project(table.rowType, table.partitionKeys) @@ -78,6 +79,10 @@ trait PaimonPartitionManagement extends SupportsAtomicPartitionManagement { metastoreClient = clientFactory.create() toPaimonPartitions(rows).foreach(metastoreClient.deletePartition) } + } catch { + case e: Exception => { + logWarning(s"Not drop partition in metastore due to $e") + } } finally { commit.close() if (metastoreClient != null) { @@ -146,6 +151,10 @@ trait PaimonPartitionManagement extends SupportsAtomicPartitionManagement { val metastoreClient: MetastoreClient = metastoreFactory.create try { partitions.foreach(metastoreClient.addPartition) + } catch { + case e: Exception => { + logWarning(s"Not add partition in metastore due to s{e}") + } } finally { metastoreClient.close() } diff --git a/paimon-spark/paimon-spark-common/src/test/scala/org/apache/paimon/spark/sql/DDLWithHiveCatalogTestBase.scala b/paimon-spark/paimon-spark-common/src/test/scala/org/apache/paimon/spark/sql/DDLWithHiveCatalogTestBase.scala index 78148357a143..66438174f055 100644 --- a/paimon-spark/paimon-spark-common/src/test/scala/org/apache/paimon/spark/sql/DDLWithHiveCatalogTestBase.scala +++ b/paimon-spark/paimon-spark-common/src/test/scala/org/apache/paimon/spark/sql/DDLWithHiveCatalogTestBase.scala @@ -98,6 +98,34 @@ abstract class DDLWithHiveCatalogTestBase extends PaimonHiveTestBase { Row("name=n/pt=cc") :: Nil) } + + // disable metastore.partitioned-table + withTable("paimon_db.paimon_tbl2") { + spark.sql(s""" + |CREATE TABLE paimon_db.paimon_tbl2 (id STRING, name STRING, pt STRING) + |USING PAIMON + |PARTITIONED BY (name, pt) + |TBLPROPERTIES('metastore.partitioned-table' = 'false') + |""".stripMargin) + Assertions.assertEquals( + getTableLocation("paimon_db.paimon_tbl2"), + s"${dBLocation.getCanonicalPath}/paimon_tbl2") + spark.sql("insert into paimon_db.paimon_tbl2 select '1', 'n', 'cc'") + spark.sql("insert into paimon_db.paimon_tbl2 select '1', 'n1', 'aa'") + spark.sql("insert into paimon_db.paimon_tbl2 select '1', 'n2', 'bb'") + + spark.sql("show partitions paimon_db.paimon_tbl2") + checkAnswer( + spark.sql("show partitions paimon_db.paimon_tbl2"), + Row("name=n/pt=cc") :: Row("name=n1/pt=aa") :: Row("name=n2/pt=bb") :: Nil) + spark.sql( + "alter table paimon_db.paimon_tbl2 drop partition (name='n1', `pt`='aa'), partition (name='n2', `pt`='bb')") + spark.sql("show partitions paimon_db.paimon_tbl2") + checkAnswer( + spark.sql("show partitions paimon_db.paimon_tbl2"), + Row("name=n/pt=cc") :: Nil) + + } } } } @@ -130,6 +158,22 @@ abstract class DDLWithHiveCatalogTestBase extends PaimonHiveTestBase { spark.sql("alter table paimon_db.paimon_tbl add partition(name='cc', `pt`='aa') ") } + + // disable metastore.partitioned-table + withTable("paimon_db.paimon_tbl2") { + spark.sql(s""" + |CREATE TABLE paimon_db.paimon_tbl2 (id STRING, name STRING, pt STRING) + |USING PAIMON + |PARTITIONED BY (name, pt) + |TBLPROPERTIES('metastore.partitioned-table' = 'true') + |""".stripMargin) + Assertions.assertEquals( + getTableLocation("paimon_db.paimon_tbl2"), + s"${dBLocation.getCanonicalPath}/paimon_tbl2") + spark.sql("insert into paimon_db.paimon_tbl2 select '1', 'n', 'cc'") + + spark.sql("alter table paimon_db.paimon_tbl2 add partition(name='cc', `pt`='aa') ") + } } } }