diff --git a/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkCatalogWithHiveTest.java b/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkCatalogWithHiveTest.java index 370ea35a88ab..5974cd422898 100644 --- a/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkCatalogWithHiveTest.java +++ b/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkCatalogWithHiveTest.java @@ -95,6 +95,9 @@ public void testCreateFormatTable(@TempDir java.nio.file.Path tempDir) { .count()) .isGreaterThan(0); + // todo: There are some bugs with Spark CSV table's options. In Spark 3.x, both reading and + // writing using the default delimiter value ',' even if we set specific it. In Spark 4.x, + // reading is correct, but writing is still incorrect, just skip it for now. // test csv table spark.sql(