From 3f44d0ca4a96a0932b21bc8ce419fbabdf9fab1b Mon Sep 17 00:00:00 2001 From: Sophie Tan Date: Wed, 12 Jun 2024 15:13:50 -0700 Subject: [PATCH] Remove unused markers --- tests/integ/scala/test_column_suite.py | 40 ------------ .../scala/test_complex_dataframe_suite.py | 3 - .../scala/test_dataframe_aggregate_suite.py | 34 ---------- tests/integ/scala/test_dataframe_copy_into.py | 1 - .../integ/scala/test_dataframe_join_suite.py | 32 ---------- .../integ/scala/test_dataframe_range_suite.py | 7 -- .../scala/test_dataframe_reader_suite.py | 10 --- .../test_dataframe_set_operations_suite.py | 19 ------ tests/integ/scala/test_dataframe_suite.py | 64 ------------------- tests/integ/scala/test_datatype_suite.py | 1 - .../integ/scala/test_file_operation_suite.py | 17 ----- tests/integ/scala/test_function_suite.py | 44 ------------- .../integ/scala/test_large_dataframe_suite.py | 1 - tests/integ/scala/test_session_suite.py | 11 +--- tests/integ/scala/test_table_suite.py | 7 -- tests/integ/scala/test_udf_suite.py | 25 +------- .../scala/test_update_delete_merge_suite.py | 22 ------- tests/integ/scala/test_view_suite.py | 4 -- tests/integ/scala/test_window_frame_suite.py | 12 ---- tests/integ/scala/test_window_spec_suite.py | 8 --- tests/integ/test_column.py | 13 ---- tests/integ/test_column_names.py | 11 ---- tests/integ/test_dataframe.py | 50 --------------- tests/integ/test_datatypes.py | 18 ------ tests/integ/test_df_aggregate.py | 16 ----- tests/integ/test_df_sort.py | 2 - tests/integ/test_df_to_pandas.py | 4 -- tests/integ/test_function.py | 22 ------- tests/integ/test_packaging.py | 7 -- tests/integ/test_session.py | 17 ----- tests/integ/test_stored_procedure.py | 19 ------ tests/integ/test_udf.py | 18 ------ tests/mock/test_filter.py | 7 -- tests/mock/test_functions.py | 13 ---- tests/mock/test_sort.py | 4 -- tests/mock/test_stage_registry.py | 4 -- tests/mock/test_udf.py | 3 - tests/mock/test_union.py | 3 - tox.ini | 2 - 39 files changed, 4 insertions(+), 591 deletions(-) diff --git a/tests/integ/scala/test_column_suite.py b/tests/integ/scala/test_column_suite.py index 829e55d4d5f..3268958d2a8 100644 --- a/tests/integ/scala/test_column_suite.py +++ b/tests/integ/scala/test_column_suite.py @@ -37,7 +37,6 @@ from tests.utils import IS_IN_STORED_PROC, TestData, Utils -@pytest.mark.localtest def test_column_names_with_space(session): c1 = '"name with space"' c2 = '"name.with.dot"' @@ -51,7 +50,6 @@ def test_column_names_with_space(session): assert df.select(df[c2]).collect() == [Row("a")] -@pytest.mark.localtest def test_column_alias_and_case_insensitive_name(session): df = session.create_dataframe([1, 2]).to_df(["a"]) assert df.select(df["a"].as_("b")).schema.fields[0].name == "B" @@ -59,7 +57,6 @@ def test_column_alias_and_case_insensitive_name(session): assert df.select(df["a"].name("b")).schema.fields[0].name == "B" -@pytest.mark.localtest def test_column_alias_and_case_sensitive_name(session): df = session.create_dataframe([1, 2]).to_df(["a"]) assert df.select(df["a"].as_('"b"')).schema.fields[0].name == '"b"' @@ -67,7 +64,6 @@ def test_column_alias_and_case_sensitive_name(session): assert df.select(df["a"].name('"b"')).schema.fields[0].name == '"b"' -@pytest.mark.localtest def test_unary_operator(session): test_data1 = TestData.test_data1(session) # unary minus @@ -79,7 +75,6 @@ def test_unary_operator(session): ] -@pytest.mark.localtest def test_alias(session): test_data1 = TestData.test_data1(session) assert test_data1.select(test_data1["NUM"]).schema.fields[0].name == "NUM" @@ -97,7 +92,6 @@ def test_alias(session): ) -@pytest.mark.localtest def test_equal_and_not_equal(session): test_data1 = TestData.test_data1(session) assert test_data1.where(test_data1["BOOL"] == True).collect() == [ # noqa: E712 @@ -120,7 +114,6 @@ def test_equal_and_not_equal(session): ] -@pytest.mark.localtest def test_gt_and_lt(session): test_data1 = TestData.test_data1(session) assert test_data1.where(test_data1["NUM"] > 1).collect() == [Row(2, False, "b")] @@ -140,7 +133,6 @@ def test_gt_and_lt(session): ).collect() == [Row(datetime.datetime(1583, 1, 1, 23, 59, 59, 567890))] -@pytest.mark.localtest def test_leq_and_geq(session): test_data1 = TestData.test_data1(session) assert test_data1.where(test_data1["NUM"] >= 2).collect() == [Row(2, False, "b")] @@ -159,7 +151,6 @@ def test_leq_and_geq(session): ] -@pytest.mark.localtest def test_null_safe_operators(session): df = session.create_dataframe([[None, 1], [2, 2], [None, None]], schema=["a", "b"]) assert df.select(df["A"].equal_null(df["B"])).collect() == [ @@ -169,7 +160,6 @@ def test_null_safe_operators(session): ] -@pytest.mark.localtest def test_nan_and_null(session): df = session.create_dataframe( [[1.1, 1], [None, 2], [math.nan, 3]], schema=["a", "b"] @@ -186,7 +176,6 @@ def test_nan_and_null(session): assert res_row2[1] == 3 -@pytest.mark.localtest def test_and_or(session): df = session.create_dataframe( [[True, True], [True, False], [False, True], [False, False]], schema=["a", "b"] @@ -199,7 +188,6 @@ def test_and_or(session): ] -@pytest.mark.localtest def test_add_subtract_multiply_divide_mod_pow(session): df = session.create_dataframe([[11, 13]], schema=["a", "b"]) assert df.select(df["A"] + df["B"]).collect() == [Row(24)] @@ -224,7 +212,6 @@ def test_add_subtract_multiply_divide_mod_pow(session): assert res[0][0].to_eng_string() == "0.153846" -@pytest.mark.localtest def test_cast(session): test_data1 = TestData.test_data1(session) sc = test_data1.select(test_data1["NUM"].cast(StringType())).schema @@ -234,7 +221,6 @@ def test_cast(session): assert not sc.fields[0].nullable -@pytest.mark.localtest def test_order(session): null_data1 = TestData.null_data1(session) assert null_data1.sort(null_data1["A"].asc()).collect() == [ @@ -281,7 +267,6 @@ def test_order(session): ] -@pytest.mark.localtest def test_bitwise_operator(session): df = session.create_dataframe([[1, 2]], schema=["a", "b"]) assert df.select(df["A"].bitand(df["B"])).collect() == [Row(0)] @@ -289,7 +274,6 @@ def test_bitwise_operator(session): assert df.select(df["A"].bitxor(df["B"])).collect() == [Row(3)] -@pytest.mark.localtest def test_withcolumn_with_special_column_names(session): # Ensure that One and "One" are different column names Utils.check_answer( @@ -332,7 +316,6 @@ def test_withcolumn_with_special_column_names(session): ) -@pytest.mark.localtest def test_toDF_with_special_column_names(session): assert ( session.create_dataframe([[1]]).to_df(["ONE"]).schema @@ -360,7 +343,6 @@ def test_toDF_with_special_column_names(session): ) -@pytest.mark.localtest def test_column_resolution_with_different_kins_of_names(session): df = session.create_dataframe([[1]]).to_df(["One"]) assert df.select(df["one"]).collect() == [Row(1)] @@ -385,7 +367,6 @@ def test_column_resolution_with_different_kins_of_names(session): df.col('"ONE ONE"') -@pytest.mark.localtest def test_drop_columns_by_string(session): df = session.create_dataframe([[1, 2]]).to_df(["One", '"One"']) assert df.drop("one").schema.fields[0].name == '"One"' @@ -401,7 +382,6 @@ def test_drop_columns_by_string(session): assert "Cannot drop all columns" in str(ex_info) -@pytest.mark.localtest def test_drop_columns_by_column(session): df = session.create_dataframe([[1, 2]]).to_df(["One", '"One"']) assert df.drop(col("one")).schema.fields[0].name == '"One"' @@ -459,7 +439,6 @@ def test_fully_qualified_column_name(session): session._run_query(f"drop function if exists {schema}.{udf_name}(integer)") -@pytest.mark.localtest def test_column_names_with_quotes(session): df = session.create_dataframe([[1, 2, 3]]).to_df('col"', '"col"', '"""col"') assert df.select(col('col"')).collect() == [Row(1)] @@ -478,7 +457,6 @@ def test_column_names_with_quotes(session): assert "Invalid identifier" in str(ex_info) -@pytest.mark.localtest def test_column_constructors_col(session): df = session.create_dataframe([[1, 2, 3]]).to_df("col", '"col"', "col .") assert df.select(col("col")).collect() == [Row(1)] @@ -499,7 +477,6 @@ def test_column_constructors_col(session): assert "invalid identifier" in str(ex_info) -@pytest.mark.localtest def test_column_constructors_select(session): df = session.create_dataframe([[1, 2, 3]]).to_df("col", '"col"', "col .") assert df.select("col").collect() == [Row(1)] @@ -548,7 +525,6 @@ def test_sql_expr_column(session): assert "syntax error" in str(ex_info) -@pytest.mark.localtest def test_errors_for_aliased_columns(session, local_testing_mode): df = session.create_dataframe([[1]]).to_df("c") # TODO: align exc experience between local testing and snowflake @@ -571,7 +547,6 @@ def test_errors_for_aliased_columns(session, local_testing_mode): assert "invalid identifier" in str(ex_info) -@pytest.mark.localtest def test_like(session): assert TestData.string4(session).where(col("A").like(lit("%p%"))).collect() == [ Row("apple"), @@ -587,7 +562,6 @@ def test_like(session): assert TestData.string4(session).where(col("A").like("")).collect() == [] -@pytest.mark.localtest def test_subfield(session, local_testing_mode): assert TestData.null_json1(session).select(col("v")["a"]).collect() == [ Row("null"), @@ -638,7 +612,6 @@ def test_subfield(session, local_testing_mode): ).collect() == [Row(None)] -@pytest.mark.localtest def test_regexp(session): assert TestData.string4(session).where(col("a").regexp(lit("ap.le"))).collect() == [ Row("apple") @@ -680,13 +653,11 @@ def test_collate(session, spec): ) -@pytest.mark.localtest def test_get_column_name(session): assert TestData.integer1(session).col("a").getName() == '"A"' assert not (col("col") > 100).getName() -@pytest.mark.localtest def test_when_case(session, local_testing_mode): assert TestData.null_data1(session).select( when(col("a").is_null(), lit(5)) @@ -715,13 +686,11 @@ def test_when_case(session, local_testing_mode): assert "Numeric value 'a' is not recognized" in str(ex_info) -@pytest.mark.localtest def test_lit_contains_single_quote(session): df = session.create_dataframe([[1, "'"], [2, "''"]]).to_df(["a", "b"]) assert df.where(col("b") == "'").collect() == [Row(1, "'")] -@pytest.mark.localtest def test_in_expression_1_in_with_constant_value_list(session): df = session.create_dataframe( [[1, "a", 1, 1], [2, "b", 2, 2], [3, "b", 33, 33]] @@ -753,7 +722,6 @@ def test_in_expression_1_in_with_constant_value_list(session): Utils.check_answer([Row(False), Row(False), Row(True)], df4, sort=False) -@pytest.mark.localtest def test_in_expression_2_in_with_subquery(session): df0 = session.create_dataframe([[1], [2], [5]]).to_df(["a"]) df = session.create_dataframe( @@ -777,7 +745,6 @@ def test_in_expression_2_in_with_subquery(session): Utils.check_answer(df4, [Row(False), Row(True), Row(True)]) -@pytest.mark.localtest def test_in_expression_3_with_all_types(session, local_testing_mode): schema = StructType( [ @@ -857,7 +824,6 @@ def test_in_expression_3_with_all_types(session, local_testing_mode): Utils.check_answer(df.filter(col("string").isin(["three"])), []) -@pytest.mark.localtest def test_in_expression_4_negative_test_to_input_column_in_value_list(session): df = session.create_dataframe( [[1, "a", 1, 1], [2, "b", 2, 2], [3, "b", 33, 33]] @@ -891,7 +857,6 @@ def test_in_expression_4_negative_test_to_input_column_in_value_list(session): ) -@pytest.mark.localtest def test_in_expression_5_negative_test_that_sub_query_has_multiple_columns(session): df = session.create_dataframe( [[1, "a", 1, 1], [2, "b", 2, 2], [3, "b", 33, 33]] @@ -903,7 +868,6 @@ def test_in_expression_5_negative_test_that_sub_query_has_multiple_columns(sessi assert "does not match the number of columns" in str(ex_info) -@pytest.mark.localtest def test_in_expression_6_multiple_columns_with_const_values(session): df = session.create_dataframe( [[1, "a", -1, 1], [2, "b", -2, 2], [3, "b", 33, 33]] @@ -930,7 +894,6 @@ def test_in_expression_6_multiple_columns_with_const_values(session): Utils.check_answer(df4, [Row(False), Row(False), Row(True)]) -@pytest.mark.localtest def test_in_expression_7_multiple_columns_with_sub_query(session): df0 = session.create_dataframe([[1, "a"], [2, "b"], [3, "c"]]).to_df("a", "b") df = session.create_dataframe( @@ -954,7 +917,6 @@ def test_in_expression_7_multiple_columns_with_sub_query(session): Utils.check_answer(df4, [Row(False), Row(False), Row(True)]) -@pytest.mark.localtest def test_in_expression_8_negative_test_to_input_column_in_value_list(session): df = session.create_dataframe( [[1, "a", 1, 1], [2, "b", 2, 2], [3, "b", 33, 33]] @@ -970,7 +932,6 @@ def test_in_expression_8_negative_test_to_input_column_in_value_list(session): ) -@pytest.mark.localtest def test_in_expression_9_negative_test_for_the_column_count_doesnt_match_the_value_list( session, ): @@ -989,7 +950,6 @@ def test_in_expression_9_negative_test_for_the_column_count_doesnt_match_the_val assert "does not match the number of columns" in str(ex_info) -@pytest.mark.localtest def test_in_expression_with_multiple_queries(session): from snowflake.snowpark._internal.analyzer import analyzer diff --git a/tests/integ/scala/test_complex_dataframe_suite.py b/tests/integ/scala/test_complex_dataframe_suite.py index 9220ab10ddc..cd63b99bf29 100644 --- a/tests/integ/scala/test_complex_dataframe_suite.py +++ b/tests/integ/scala/test_complex_dataframe_suite.py @@ -17,7 +17,6 @@ from tests.utils import IS_IN_STORED_PROC_LOCALFS, TestFiles, Utils -@pytest.mark.localtest def test_combination_of_multiple_operators(session): df1 = session.create_dataframe([1, 2]).to_df("a") df2 = session.create_dataframe([[i, f"test{i}"] for i in [1, 2]]).to_df("a", "b") @@ -48,7 +47,6 @@ def test_combination_of_multiple_operators(session): ] -@pytest.mark.localtest def test_combination_of_multiple_operators_with_filters(session): df1 = session.create_dataframe([i for i in range(1, 11)]).to_df("a") df2 = session.create_dataframe([[i, f"test{i}"] for i in range(1, 11)]).to_df( @@ -80,7 +78,6 @@ def test_combination_of_multiple_operators_with_filters(session): assert df.collect() == [Row(i, f"test{i}") for i in range(1, 11)] -@pytest.mark.localtest def test_join_on_top_of_unions(session): df1 = session.create_dataframe([i for i in range(1, 6)]).to_df("a") df2 = session.create_dataframe([i for i in range(6, 11)]).to_df("a") diff --git a/tests/integ/scala/test_dataframe_aggregate_suite.py b/tests/integ/scala/test_dataframe_aggregate_suite.py index af3ca9a1665..ecf28b770cb 100644 --- a/tests/integ/scala/test_dataframe_aggregate_suite.py +++ b/tests/integ/scala/test_dataframe_aggregate_suite.py @@ -47,7 +47,6 @@ from tests.utils import IS_IN_STORED_PROC, TestData, Utils -@pytest.mark.localtest def test_pivot(session): Utils.check_answer( TestData.monthly_sales(session) @@ -69,7 +68,6 @@ def test_pivot(session): ) -@pytest.mark.localtest @pytest.mark.parametrize( "func,expected", [ @@ -189,7 +187,6 @@ def test_group_by_pivot(session): ).agg([sum(col("amount")), avg(col("amount"))]) -@pytest.mark.localtest def test_group_by_pivot_dynamic_any(session, caplog): Utils.check_answer( TestData.monthly_sales_with_team(session) @@ -255,7 +252,6 @@ def test_group_by_pivot_dynamic_subquery(session): ) -@pytest.mark.localtest def test_join_on_pivot(session): df1 = ( TestData.monthly_sales(session) @@ -276,7 +272,6 @@ def test_join_on_pivot(session): ) -@pytest.mark.localtest def test_pivot_on_join(session): df = session.create_dataframe([[1, "One"], [2, "Two"]]).to_df("empid", "name") @@ -315,7 +310,6 @@ def test_pivot_dynamic_any_with_temp_table_inlined_data(session, local_testing_m assert pivot_op_df.count() == 1 -@pytest.mark.localtest def test_pivot_dynamic_any(session): Utils.check_answer( TestData.monthly_sales(session) @@ -400,7 +394,6 @@ def test_pivot_dynamic_subquery_with_bad_subquery(session): assert "Pivot subquery must select single column" in str(ex_info.value) -@pytest.mark.localtest def test_pivot_default_on_none(session, caplog): class MonthlySales(NamedTuple): empid: int @@ -442,7 +435,6 @@ class MonthlySales(NamedTuple): assert PIVOT_DEFAULT_ON_NULL_WARNING in caplog.text -@pytest.mark.localtest def test_rel_grouped_dataframe_agg(session): df = ( session.create_dataframe([[1, "One"], [2, "Two"], [3, "Three"]]) @@ -467,7 +459,6 @@ def test_rel_grouped_dataframe_agg(session): ] -@pytest.mark.localtest def test_group_by(session): result = ( TestData.nurse(session) @@ -604,7 +595,6 @@ def test_group_by_grouping_sets(session): ) -@pytest.mark.localtest def test_rel_grouped_dataframe_max(session): df1 = session.create_dataframe( [("a", 1, 11, "b"), ("b", 2, 22, "c"), ("a", 3, 33, "d"), ("b", 4, 44, "e")] @@ -623,7 +613,6 @@ def test_rel_grouped_dataframe_max(session): assert df1.group_by("key").agg([max("value1"), max("value2")]).collect() == expected -@pytest.mark.localtest def test_rel_grouped_dataframe_avg_mean(session): df1 = session.create_dataframe( [("a", 1, 11, "b"), ("b", 2, 22, "c"), ("a", 3, 33, "d"), ("b", 4, 44, "e")] @@ -652,7 +641,6 @@ def test_rel_grouped_dataframe_avg_mean(session): ) -@pytest.mark.localtest def test_rel_grouped_dataframe_median(session): df1 = session.create_dataframe( [ @@ -697,7 +685,6 @@ def test_rel_grouped_dataframe_median(session): ) -@pytest.mark.localtest def test_builtin_functions(session): df = session.create_dataframe([(1, 11), (2, 12), (1, 13)]).to_df(["a", "b"]) @@ -711,7 +698,6 @@ def test_builtin_functions(session): ] -@pytest.mark.localtest def test_non_empty_arg_functions(session): func_name = "avg" with pytest.raises(ValueError) as ex_info: @@ -754,7 +740,6 @@ def test_non_empty_arg_functions(session): ) -@pytest.mark.localtest def test_null_count(session): assert TestData.test_data3(session).group_by("a").agg( count(col("b")) @@ -782,7 +767,6 @@ def test_null_count(session): ).collect() == [Row(1, 1, 2)] -@pytest.mark.localtest def test_distinct(session): df = session.create_dataframe( [(1, "one", 1.0), (2, "one", 2.0), (2, "two", 1.0)] @@ -807,7 +791,6 @@ def test_distinct(session): assert df.filter(col("i") < 0).distinct().collect() == [] -@pytest.mark.localtest def test_distinct_and_joins(session): lhs = session.create_dataframe([(1, "one", 1.0), (2, "one", 2.0)]).to_df( "i", "s", '"i"' @@ -837,7 +820,6 @@ def test_distinct_and_joins(session): assert res == [Row("one", "one")] -@pytest.mark.localtest def test_groupBy(session): assert TestData.test_data2(session).group_by("a").agg(sum(col("b"))).collect() == [ Row(1, 3), @@ -889,7 +871,6 @@ def test_groupBy(session): ] -@pytest.mark.localtest def test_agg_should_be_order_preserving(session): df = ( session.range(2) @@ -906,7 +887,6 @@ def test_agg_should_be_order_preserving(session): assert df.collect() == [Row(0, 0, 1, 0), Row(1, 1, 1, 1)] -@pytest.mark.localtest def test_count(session): assert TestData.test_data2(session).agg( [count(col("a")), sum_distinct(col("a"))] @@ -1051,14 +1031,12 @@ def test_decimal_sum_over_window_should_work(session): assert df.select(avg("a").over()).collect() == [Row(2.0), Row(2.0), Row(2.0)] -@pytest.mark.localtest def test_aggregate_function_in_groupby(session): with pytest.raises(SnowparkSQLException) as ex_info: TestData.test_data4(session).group_by(sum(col('"KEY"'))).count().collect() assert "is not a valid group by expression" in str(ex_info) -@pytest.mark.localtest def test_ints_in_agg_exprs_are_taken_as_groupby_ordinal(session): assert TestData.test_data2(session).group_by(lit(3), lit(4)).agg( [lit(6), lit(7), sum(col("b"))] @@ -1086,7 +1064,6 @@ def test_ints_in_agg_exprs_are_taken_as_groupby_ordinal_sql(session): ).collect() == [Row(3, 4, 9)] -@pytest.mark.localtest def test_distinct_and_unions(session: object) -> object: lhs = session.create_dataframe([(1, "one", 1.0), (2, "one", 2.0)]).to_df( "i", "s", '"i"' @@ -1114,7 +1091,6 @@ def test_distinct_and_unions(session: object) -> object: assert res == [Row("one")] -@pytest.mark.localtest def test_distinct_and_unionall(session): lhs = session.create_dataframe([(1, "one", 1.0), (2, "one", 2.0)]).to_df( "i", "s", '"i"' @@ -1197,17 +1173,14 @@ def test_count_if(session): session.sql(f"SELECT COUNT_IF(x) FROM {temp_view_name}").collect() -@pytest.mark.localtest def test_agg_without_groups(session): assert TestData.test_data2(session).agg(sum(col("b"))).collect() == [Row(9)] -@pytest.mark.localtest def test_agg_without_groups_and_functions(session): assert TestData.test_data2(session).agg(lit(1)).collect() == [Row(1)] -@pytest.mark.localtest def test_null_average(session): assert TestData.test_data3(session).agg(avg(col("b"))).collect() == [Row(2.0)] @@ -1220,7 +1193,6 @@ def test_null_average(session): ).collect() == [Row(2.0, 2.0)] -@pytest.mark.localtest def test_zero_average(session): df = session.create_dataframe([[]]).to_df(["a"]) assert df.agg(avg(col("a"))).collect() == [Row(None)] @@ -1230,7 +1202,6 @@ def test_zero_average(session): ] -@pytest.mark.localtest def test_multiple_column_distinct_count(session): df1 = session.create_dataframe( [ @@ -1289,7 +1260,6 @@ def test_multiple_column_distinct_count(session): ).collect() == [Row(2.25)] -@pytest.mark.localtest def test_zero_count(session): empty_table = session.create_dataframe([[]]).to_df(["a"]) assert empty_table.agg([count(col("a")), sum_distinct(col("a"))]).collect() == [ @@ -1308,19 +1278,16 @@ def test_zero_stddev(session): ).collect() == [Row(None, None, None)] -@pytest.mark.localtest def test_zero_sum(session): df = session.create_dataframe([[]]).to_df(["a"]) assert df.agg([sum(col("a"))]).collect() == [Row(None)] -@pytest.mark.localtest def test_zero_sum_distinct(session): df = session.create_dataframe([[]]).to_df(["a"]) assert df.agg([sum_distinct(col("a"))]).collect() == [Row(None)] -@pytest.mark.localtest def test_limit_and_aggregates(session): df = session.create_dataframe([("a", 1), ("b", 2), ("c", 1), ("d", 5)]).to_df( "id", "value" @@ -1331,7 +1298,6 @@ def test_limit_and_aggregates(session): ) -@pytest.mark.localtest def test_listagg(session): df = session.create_dataframe( [ diff --git a/tests/integ/scala/test_dataframe_copy_into.py b/tests/integ/scala/test_dataframe_copy_into.py index 4740980b890..90930424d2e 100644 --- a/tests/integ/scala/test_dataframe_copy_into.py +++ b/tests/integ/scala/test_dataframe_copy_into.py @@ -662,7 +662,6 @@ def test_transormation_as_clause_no_effect(session, tmp_stage_name1): Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_copy_with_wrong_dataframe(session): with pytest.raises(SnowparkDataframeException) as exec_info: session.table("a_table_name").copy_into_table("a_table_name") diff --git a/tests/integ/scala/test_dataframe_join_suite.py b/tests/integ/scala/test_dataframe_join_suite.py index 8c8066362f5..5f92c7f887c 100644 --- a/tests/integ/scala/test_dataframe_join_suite.py +++ b/tests/integ/scala/test_dataframe_join_suite.py @@ -28,7 +28,6 @@ from tests.utils import Utils -@pytest.mark.localtest def test_join_using(session): df = session.create_dataframe([[i, str(i)] for i in range(1, 4)]).to_df( ["int", "str"] @@ -43,7 +42,6 @@ def test_join_using(session): ] -@pytest.mark.localtest def test_join_using_multiple_columns(session): df = session.create_dataframe([[i, i + 1, str(i)] for i in range(1, 4)]).to_df( ["int", "int2", "str"] @@ -60,7 +58,6 @@ def test_join_using_multiple_columns(session): ] -@pytest.mark.localtest def test_full_outer_join_followed_by_inner_join(session): a = session.create_dataframe([[1, 2], [2, 3]]).to_df(["a", "b"]) b = session.create_dataframe([[2, 5], [3, 4]]).to_df(["a", "c"]) @@ -71,7 +68,6 @@ def test_full_outer_join_followed_by_inner_join(session): assert abc.collect() == [Row(3, None, 4, 1)] -@pytest.mark.localtest def test_limit_with_join(session): df = session.create_dataframe([[1, 1, "1"], [2, 2, "3"]]).to_df( ["int", "int2", "str"] @@ -89,7 +85,6 @@ def test_limit_with_join(session): assert inner.collect() == [Row(1)] -@pytest.mark.localtest def test_default_inner_join(session): df = session.create_dataframe([1, 2]).to_df(["a"]) df2 = session.create_dataframe([[i, f"test{i}"] for i in range(1, 3)]).to_df( @@ -106,7 +101,6 @@ def test_default_inner_join(session): ] -@pytest.mark.localtest def test_default_inner_join_using_column(session): df = session.create_dataframe([1, 2]).to_df(["a"]) df2 = session.create_dataframe([[i, f"test{i}"] for i in range(1, 3)]).to_df( @@ -117,7 +111,6 @@ def test_default_inner_join_using_column(session): assert df.join(df2, "a").filter(col("a") > 1).collect() == [Row(2, "test2")] -@pytest.mark.localtest def test_3_way_joins(session): df1 = session.create_dataframe([1, 2]).to_df(["a"]) df2 = session.create_dataframe([[i, f"test{i}"] for i in range(1, 3)]).to_df( @@ -132,7 +125,6 @@ def test_3_way_joins(session): assert res == [Row("test1", 1, "hello1"), Row("test2", 2, "hello2")] -@pytest.mark.localtest def test_default_inner_join_with_join_conditions(session): df1 = session.create_dataframe([[i, f"test{i}"] for i in range(1, 3)]).to_df( ["a", "b"] @@ -148,7 +140,6 @@ def test_default_inner_join_with_join_conditions(session): ] -@pytest.mark.localtest def test_join_with_multiple_conditions(session): df1 = session.create_dataframe([[i, f"test{i}"] for i in range(1, 3)]).to_df( ["a", "b"] @@ -176,7 +167,6 @@ def test_join_with_ambiguous_column_in_condition(session): assert "The reference to the column 'A' is ambiguous." in ex_info.value.message -@pytest.mark.localtest def test_join_using_multiple_columns_and_specifying_join_type( session, local_testing_mode ): @@ -240,7 +230,6 @@ def test_join_using_multiple_columns_and_specifying_join_type( ) -@pytest.mark.localtest def test_join_using_conditions_and_specifying_join_type(session): df1 = session.create_dataframe( @@ -258,7 +247,6 @@ def test_join_using_conditions_and_specifying_join_type(session): Utils.check_answer(df1.join(df2, join_cond, "anti"), [Row(3, 4, "3")]) -@pytest.mark.localtest def test_natural_join(session): df = session.create_dataframe([1, 2]).to_df("a") df2 = session.create_dataframe([[i, f"test{i}"] for i in range(1, 3)]).to_df( @@ -267,7 +255,6 @@ def test_natural_join(session): Utils.check_answer(df.natural_join(df2), [Row(1, "test1"), Row(2, "test2")]) -@pytest.mark.localtest def test_natural_outer_join(session): df1 = session.create_dataframe([[1, "1"], [3, "3"]]).to_df("a", "b") df2 = session.create_dataframe([[1, "1"], [4, "4"]]).to_df("a", "c") @@ -283,7 +270,6 @@ def test_natural_outer_join(session): ) -@pytest.mark.localtest def test_cross_join(session): df1 = session.create_dataframe([[1, "1"], [3, "3"]]).to_df(["int", "str"]) df2 = session.create_dataframe([[2, "2"], [4, "4"]]).to_df(["int", "str"]) @@ -533,7 +519,6 @@ def test_asof_join_negative(session): ).collect() -@pytest.mark.localtest def test_join_ambiguous_columns_with_specified_sources( session, ): @@ -591,7 +576,6 @@ def test_join_ambiguous_columns_without_specified_sources(session): ) -@pytest.mark.localtest def test_join_expression_ambiguous_columns( session, ): @@ -638,7 +622,6 @@ def test_semi_join_expression_ambiguous_columns(session): assert "not present" in str(ex_info) -@pytest.mark.localtest def test_semi_join_with_columns_from_LHS( session, ): @@ -700,7 +683,6 @@ def test_semi_join_with_columns_from_LHS( assert sorted(res, key=operator.itemgetter(0)) == [Row(1), Row(2)] -@pytest.mark.localtest @pytest.mark.parametrize( "join_type", ["inner", "leftouter", "rightouter", "fullouter", "asof"] ) @@ -797,7 +779,6 @@ def test_columns_with_and_without_quotes(session, local_testing_mode): assert "reference to the column 'INTCOL' is ambiguous." in ex_info.value.message -@pytest.mark.localtest def test_aliases_multiple_levels_deep( session, ): @@ -905,7 +886,6 @@ def test_negative_test_for_self_join_with_conditions(session): Utils.drop_table(session, table_name1) -@pytest.mark.localtest def test_clone_can_help_these_self_joins(session): table_name1 = Utils.random_name_for_temp_object(TempObjectType.TABLE) schema = StructType( @@ -942,7 +922,6 @@ def test_clone_can_help_these_self_joins(session): ] -@pytest.mark.localtest def test_natural_cross_joins(session): df1 = session.create_dataframe([[1, 2], [2, 3]], schema=["c1", "c2"]) df2 = df1 # Another reference of "df" @@ -972,7 +951,6 @@ def test_natural_cross_joins(session): ] -@pytest.mark.localtest def test_clone_with_join_dataframe(session): table_name1 = Utils.random_name_for_temp_object(TempObjectType.TABLE) session.create_dataframe([[1, 2], [2, 3]], schema=["c1", "c2"]).write.save_as_table( @@ -994,7 +972,6 @@ def test_clone_with_join_dataframe(session): assert cloned_join_df.collect() == [Row(2, 3, 1, 2)] -@pytest.mark.localtest def test_join_of_join(session): table_name1 = Utils.random_name_for_temp_object(TempObjectType.TABLE) session.create_dataframe([[1, 1], [2, 2]], schema=["c1", "c2"]).write.save_as_table( @@ -1092,7 +1069,6 @@ def test_with_column_on_join(session): ) -@pytest.mark.localtest def test_process_outer_join_results_using_the_non_nullable_columns_in_the_join_output( session, ): @@ -1119,7 +1095,6 @@ def test_process_outer_join_results_using_the_non_nullable_columns_in_the_join_o ) -@pytest.mark.localtest def test_outer_join_conversion(session): df = session.create_dataframe([(1, 2, "1"), (3, 4, "3")]).to_df( ["int", "int2", "str"] @@ -1161,7 +1136,6 @@ def test_outer_join_conversion(session): assert left_join_2_inner == [Row(1, 2, "1", 1, 3, "1")] -@pytest.mark.localtest def test_dont_throw_analysis_exception_in_check_cartesian( session, ): @@ -1176,7 +1150,6 @@ def test_dont_throw_analysis_exception_in_check_cartesian( dfOne.join(dfTwo, col("a") == col("b"), "left").collect() -@pytest.mark.localtest def test_name_alias_on_multiple_join(session): table_trips = Utils.random_name_for_temp_object(TempObjectType.TABLE) table_stations = Utils.random_name_for_temp_object(TempObjectType.TABLE) @@ -1299,7 +1272,6 @@ def test_report_error_when_refer_common_col(session): assert "The reference to the column 'C' is ambiguous." in ex_info.value.message -@pytest.mark.localtest def test_select_all_on_join_result(session): df_left = session.create_dataframe([[1, 2]]).to_df("a", "b") df_right = session.create_dataframe([[3, 4]]).to_df("c", "d") @@ -1345,7 +1317,6 @@ def test_select_all_on_join_result(session): ) -@pytest.mark.localtest def test_select_left_right_on_join_result(session): df_left = session.create_dataframe([[1, 2]]).to_df("a", "b") df_right = session.create_dataframe([[3, 4]]).to_df("c", "d") @@ -1372,7 +1343,6 @@ def test_select_left_right_on_join_result(session): ) -@pytest.mark.localtest def test_select_left_right_combination_on_join_result(session): df_left = session.create_dataframe([[1, 2]]).to_df("a", "b") df_right = session.create_dataframe([[3, 4]]).to_df("c", "d") @@ -1430,7 +1400,6 @@ def test_select_left_right_combination_on_join_result(session): ) -@pytest.mark.localtest def test_select_columns_on_join_result_with_conflict_name( session, ): @@ -1496,7 +1465,6 @@ def test_nested_join_diamond_shape_error( df5.collect() -@pytest.mark.localtest def test_nested_join_diamond_shape_workaround(session): df1 = session.create_dataframe([[1]], schema=["a"]) df2 = session.create_dataframe([[1]], schema=["a"]) diff --git a/tests/integ/scala/test_dataframe_range_suite.py b/tests/integ/scala/test_dataframe_range_suite.py index 074f5adda95..5b535cb32ac 100644 --- a/tests/integ/scala/test_dataframe_range_suite.py +++ b/tests/integ/scala/test_dataframe_range_suite.py @@ -13,21 +13,18 @@ from tests.integ.test_packaging import is_pandas_and_numpy_available -@pytest.mark.localtest def test_range(session): assert session.range(5).collect() == [Row(i) for i in range(5)] assert session.range(3, 5).collect() == [Row(i) for i in range(3, 5)] assert session.range(3, 10, 2).collect() == [Row(i) for i in range(3, 10, 2)] -@pytest.mark.localtest def test_negative_test(session): with pytest.raises(ValueError) as ex_info: session.range(-3, 5, 0) assert "The step for range() cannot be 0." in str(ex_info) -@pytest.mark.localtest def test_empty_result_and_negative_start_end_step(session): assert session.range(3, 5, -1).collect() == [] assert session.range(-3, -5, 1).collect() == [] @@ -36,7 +33,6 @@ def test_empty_result_and_negative_start_end_step(session): assert session.range(10, 3, -3).collect() == [Row(i) for i in range(10, 3, -3)] -@pytest.mark.localtest def test_range_api(session): res3 = session.range(1, -2).select("id") assert res3.count() == 0 @@ -66,7 +62,6 @@ def test_range_api(session): assert res16.count() == 500 -@pytest.mark.localtest def test_range_with_randomized_parameters(session): MAX_NUM_STEPS = 10 * 1000 MAX_VALUE = 2**31 - 1 @@ -101,7 +96,6 @@ def random_bound(): assert res[0][1] == expected_sum -@pytest.mark.localtest def test_range_with_max_and_min(session): MAX_VALUE = 0x7FFFFFFFFFFFFFFF MIN_VALUE = -0x8000000000000000 @@ -112,7 +106,6 @@ def test_range_with_max_and_min(session): @pytest.mark.skipif(not is_pandas_and_numpy_available, reason="requires numpy") -@pytest.mark.localtest def test_range_with_large_range_and_step(session): import numpy as np diff --git a/tests/integ/scala/test_dataframe_reader_suite.py b/tests/integ/scala/test_dataframe_reader_suite.py index ce0d0793272..9007056d179 100644 --- a/tests/integ/scala/test_dataframe_reader_suite.py +++ b/tests/integ/scala/test_dataframe_reader_suite.py @@ -242,7 +242,6 @@ def setup(session, resources_path, local_testing_mode): session.sql(f"DROP STAGE IF EXISTS {tmp_stage_only_json_file}").collect() -@pytest.mark.localtest @pytest.mark.parametrize("mode", ["select", "copy"]) def test_read_csv(session, mode): reader = get_reader(session, mode) @@ -419,7 +418,6 @@ def mock_run_query(*args, **kwargs): assert "Could not infer csv schema due to exception:" in caplog.text -@pytest.mark.localtest @pytest.mark.parametrize("mode", ["select", "copy"]) def test_read_csv_incorrect_schema(session, mode): reader = get_reader(session, mode) @@ -493,7 +491,6 @@ def test_save_as_table_do_not_change_col_name(session): Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_read_csv_with_more_operations(session): test_file_on_stage = f"@{tmp_stage_name1}/{test_file_csv}" df1 = session.read.schema(user_schema).csv(test_file_on_stage).filter(col("a") < 2) @@ -541,7 +538,6 @@ def test_read_csv_with_more_operations(session): ] -@pytest.mark.localtest @pytest.mark.parametrize("mode", ["select", "copy"]) def test_read_csv_with_format_type_options(session, mode, local_testing_mode): test_file_colon = f"@{tmp_stage_name1}/{test_file_csv_colon}" @@ -604,7 +600,6 @@ def test_read_csv_with_format_type_options(session, mode, local_testing_mode): ] -@pytest.mark.localtest @pytest.mark.parametrize("mode", ["select", "copy"]) def test_to_read_files_from_stage(session, resources_path, mode, local_testing_mode): data_files_stage = Utils.random_stage_name() @@ -639,7 +634,6 @@ def test_to_read_files_from_stage(session, resources_path, mode, local_testing_m session.sql(f"DROP STAGE IF EXISTS {data_files_stage}") -@pytest.mark.localtest @pytest.mark.xfail(reason="SNOW-575700 flaky test", strict=False) @pytest.mark.parametrize("mode", ["select", "copy"]) def test_for_all_csv_compression_keywords(session, temp_schema, mode): @@ -676,7 +670,6 @@ def test_for_all_csv_compression_keywords(session, temp_schema, mode): session.sql(f"drop file format {format_name}") -@pytest.mark.localtest @pytest.mark.parametrize("mode", ["select", "copy"]) def test_read_csv_with_special_chars_in_format_type_options(session, mode): schema1 = StructType( @@ -758,7 +751,6 @@ def test_read_csv_with_special_chars_in_format_type_options(session, mode): assert res == [Row('"1.234"', '"09:10:11"'), Row('"2.5"', "12:34:56")] -@pytest.mark.localtest @pytest.mark.parametrize("mode", ["select", "copy"]) def test_read_csv_with_quotes_containing_delimiter(session, mode): schema1 = StructType( @@ -891,7 +883,6 @@ def test_read_metadata_column_from_stage(session, file_format): ) -@pytest.mark.localtest @pytest.mark.parametrize("mode", ["select", "copy"]) def test_read_json_with_no_schema(session, mode): json_path = f"@{tmp_stage_name1}/{test_file_json}" @@ -948,7 +939,6 @@ def test_read_json_with_no_schema(session, mode): get_reader(session, mode).schema(user_schema).json(json_path) -@pytest.mark.localtest @pytest.mark.parametrize("mode", ["select", "copy"]) def test_read_json_with_infer_schema(session, mode): json_path = f"@{tmp_stage_name1}/{test_file_json}" diff --git a/tests/integ/scala/test_dataframe_set_operations_suite.py b/tests/integ/scala/test_dataframe_set_operations_suite.py index 4cdb7c0ddb5..6ec260ea796 100644 --- a/tests/integ/scala/test_dataframe_set_operations_suite.py +++ b/tests/integ/scala/test_dataframe_set_operations_suite.py @@ -16,7 +16,6 @@ from tests.utils import TestData, Utils -@pytest.mark.localtest def test_union_with_filters(session): """Tests union queries with a filter added""" @@ -50,7 +49,6 @@ def check(new_col: Column, cfilter: Column, result: List[Row]): check(lit(2).cast(IntegerType()), col("c") != 2, list()) -@pytest.mark.localtest def test_union_all_with_filters(session): """Tests union queries with a filter added""" @@ -84,7 +82,6 @@ def check(new_col: Column, cfilter: Column, result: List[Row]): check(lit(2).cast(IntegerType()), col("c") != 2, list()) -@pytest.mark.localtest def test_except(session): lower_case_data = TestData.lower_case_data(session) upper_case_data = TestData.upper_case_data(session) @@ -125,7 +122,6 @@ def test_except(session): Utils.check_answer(all_nulls.filter(lit(0) == 1).except_(all_nulls), []) -@pytest.mark.localtest def test_except_between_two_projects_without_references_used_in_filter(session): df = session.create_dataframe(((1, 2, 4), (1, 3, 5), (2, 2, 3), (2, 4, 5))).to_df( "a", "b", "c" @@ -136,7 +132,6 @@ def test_except_between_two_projects_without_references_used_in_filter(session): Utils.check_answer(df1.select("b").except_(df2.select("c")), Row(2)) -@pytest.mark.localtest def test_union_unionall_unionbyname_unionallbyname_in_one_case(session): df1 = session.create_dataframe([(1, 2, 3)]).to_df("a", "b", "c") df2 = session.create_dataframe([(3, 1, 2)]).to_df("c", "a", "b") @@ -153,7 +148,6 @@ def test_union_unionall_unionbyname_unionallbyname_in_one_case(session): Utils.check_answer(df1.union_all_by_name(df3), [Row(1, 2, 3), Row(3, 1, 2)]) -@pytest.mark.localtest def test_nondeterministic_expressions_should_not_be_pushed_down(session): df1 = session.create_dataframe([(i,) for i in range(1, 21)]).to_df("i") df2 = session.create_dataframe([(i,) for i in range(1, 11)]).to_df("i") @@ -171,7 +165,6 @@ def test_nondeterministic_expressions_should_not_be_pushed_down(session): Utils.check_answer(except_.collect(), except_.collect()) -@pytest.mark.localtest def test_union_all(session): td4 = TestData.test_data4(session) union_df = td4.union(td4).union(td4).union(td4).union(td4) @@ -184,7 +177,6 @@ def test_union_all(session): assert res == [Row(1, 25250)] -@pytest.mark.localtest def test_union_by_name(session): df1 = session.create_dataframe([(1, 2, 3)]).to_df("a", "b", "c") df2 = session.create_dataframe([(3, 1, 2)]).to_df("c", "a", "b") @@ -205,7 +197,6 @@ def test_union_by_name(session): df1.union_by_name(df2) -@pytest.mark.localtest def test_unionall_by_name(session): df1 = session.create_dataframe([(1, 2, 3)]).to_df("a", "b", "c") df2 = session.create_dataframe([(3, 1, 2)]).to_df("c", "a", "b") @@ -226,7 +217,6 @@ def test_unionall_by_name(session): df1.union_all_by_name(df2) -@pytest.mark.localtest def test_union_by_quoted_name(session): df1 = session.create_dataframe([(1, 2, 3)]).to_df('"a"', "a", "c") df2 = session.create_dataframe([(3, 1, 2)]).to_df("c", '"a"', "a") @@ -242,7 +232,6 @@ def test_union_by_quoted_name(session): df1.union_by_name(df2) -@pytest.mark.localtest def test_unionall_by_quoted_name(session): df1 = session.create_dataframe([(1, 2, 3)]).to_df('"a"', "a", "c") df2 = session.create_dataframe([(3, 1, 2)]).to_df("c", '"a"', "a") @@ -258,7 +247,6 @@ def test_unionall_by_quoted_name(session): df1.union_by_name(df2) -@pytest.mark.localtest def test_intersect_nullability(session): non_nullable_ints = session.create_dataframe([[1], [3]]).to_df("a") null_ints = TestData.null_ints(session) @@ -292,7 +280,6 @@ def test_intersect_nullability(session): assert all(not i.nullable for i in df4.schema.fields) -@pytest.mark.localtest def test_performing_set_ops_on_non_native_types(session): dates = session.create_dataframe( [ @@ -316,7 +303,6 @@ def test_performing_set_ops_on_non_native_types(session): dates.except_(widen_typed_rows).collect() -@pytest.mark.localtest def test_union_by_name_check_name_duplication(session): c0 = "ab" c1 = "AB" @@ -333,7 +319,6 @@ def test_union_by_name_check_name_duplication(session): df1.union_by_name(df2) -@pytest.mark.localtest def test_unionall_by_name_check_name_duplication(session): c0 = "ab" c1 = "AB" @@ -350,7 +335,6 @@ def test_unionall_by_name_check_name_duplication(session): df1.union_all_by_name(df2) -@pytest.mark.localtest def test_intersect(session): lcd = TestData.lower_case_data(session) res = lcd.intersect(lcd).collect() @@ -378,7 +362,6 @@ def test_intersect(session): assert res == [Row("id", 1), Row("id1", 1), Row("id1", 2)] -@pytest.mark.localtest def test_project_should_not_be_pushed_down_through_intersect_or_except(session): df1 = session.create_dataframe([[i] for i in range(1, 101)]).to_df("i") df2 = session.create_dataframe([[i] for i in range(1, 31)]).to_df("i") @@ -387,7 +370,6 @@ def test_project_should_not_be_pushed_down_through_intersect_or_except(session): assert df1.except_(df2).count() == 70 -@pytest.mark.localtest def test_except_nullability(session): non_nullable_ints = session.create_dataframe(((11,), (3,))).to_df(["a"]) for attribute in non_nullable_ints.schema._to_attributes(): @@ -421,7 +403,6 @@ def test_except_distinct_sql_compliance(session): Utils.check_answer(df_left.except_(df_right), [Row(2), Row(4)]) -@pytest.mark.localtest def test_mix_set_operator(session): df1 = session.create_dataframe([1]).to_df("a") df2 = session.create_dataframe([2]).to_df("a") diff --git a/tests/integ/scala/test_dataframe_suite.py b/tests/integ/scala/test_dataframe_suite.py index dc8d1159fb5..2430164669c 100644 --- a/tests/integ/scala/test_dataframe_suite.py +++ b/tests/integ/scala/test_dataframe_suite.py @@ -69,7 +69,6 @@ SAMPLING_DEVIATION = 0.4 -@pytest.mark.localtest def test_null_data_in_tables(session, local_testing_mode): table_name = Utils.random_name_for_temp_object(TempObjectType.TABLE) try: @@ -90,7 +89,6 @@ def test_null_data_in_tables(session, local_testing_mode): Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_null_data_in_local_relation_with_filters(session): df = session.create_dataframe([[1, None], [2, "NotNull"], [3, None]]).to_df( ["a", "b"] @@ -113,7 +111,6 @@ def test_null_data_in_local_relation_with_filters(session): ] -@pytest.mark.localtest def test_project_null_values(session): """Tests projecting null values onto different columns in a dataframe""" df = session.create_dataframe([1, 2]).to_df("a").with_column("b", lit(None)) @@ -149,7 +146,6 @@ def test_bulk_insert_from_collected_result(session): Utils.drop_table(session, table_name_copied) -@pytest.mark.localtest def test_write_null_data_to_table(session, local_testing_mode): table_name = Utils.random_name_for_temp_object(TempObjectType.TABLE) df = session.create_dataframe([(1, None), (2, None), (3, None)]).to_df("a", "b") @@ -161,7 +157,6 @@ def test_write_null_data_to_table(session, local_testing_mode): Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_view_should_be_updated(session, local_testing_mode): """Assert views should reflect changes if the underlying data is updated.""" table_name = Utils.random_name_for_temp_object(TempObjectType.TABLE) @@ -184,7 +179,6 @@ def test_view_should_be_updated(session, local_testing_mode): Utils.drop_view(session, view_name) -@pytest.mark.localtest def test_create_or_replace_view_with_null_data(session, local_testing_mode): df = session.create_dataframe([[1, None], [2, "NotNull"], [3, None]]).to_df( ["a", "b"] @@ -201,7 +195,6 @@ def test_create_or_replace_view_with_null_data(session, local_testing_mode): Utils.drop_view(session, view_name) -@pytest.mark.localtest def test_adjust_column_width_of_show(session): df = session.create_dataframe([[1, None], [2, "NotNull"]]).to_df("a", "b") # run show(), make sure no error is reported @@ -220,7 +213,6 @@ def test_adjust_column_width_of_show(session): ) -@pytest.mark.localtest def test_show_with_null_data(session): df = session.create_dataframe([[1, None], [2, "NotNull"]]).to_df("a", "b") # run show(), make sure no error is reported @@ -239,7 +231,6 @@ def test_show_with_null_data(session): ) -@pytest.mark.localtest def test_show_multi_lines_row(session): df = session.create_dataframe( [ @@ -303,7 +294,6 @@ def test_show(session): ) -@pytest.mark.localtest def test_cache_result(session): table_name = Utils.random_name_for_temp_object(TempObjectType.TABLE) session.create_dataframe([[1], [2]], schema=["num"]).write.save_as_table(table_name) @@ -369,7 +359,6 @@ def test_cache_result_with_show(session): session._run_query(f"drop table {table_name1}") -@pytest.mark.localtest def test_drop_cache_result_try_finally(session): df = session.create_dataframe([[1, 2]], schema=["a", "b"]) cached = df.cache_result() @@ -393,7 +382,6 @@ def test_drop_cache_result_try_finally(session): df_after_cached.collect() -@pytest.mark.localtest def test_drop_cache_result_context_manager(session): df = session.create_dataframe([[1, 2]], schema=["a", "b"]) with df.cache_result() as cached: @@ -859,7 +847,6 @@ def test_df_stat_crosstab_max_column_test(session): assert res_4[0]["A"] == 1 and res_4[0]["CAST(1 AS NUMBER(38,0))"] == 1001 -@pytest.mark.localtest def test_select_star(session): double2 = TestData.double2(session) expected = TestData.double2(session).collect() @@ -867,7 +854,6 @@ def test_select_star(session): assert double2.select(double2.col("*")).collect() == expected -@pytest.mark.localtest def test_first(session): assert TestData.integer1(session).first() == Row(1) assert TestData.null_data1(session).first() == Row(None) @@ -887,7 +873,6 @@ def test_first(session): assert sorted(res, key=lambda x: x[0]) == [Row(1), Row(2), Row(3)] -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC_LOCALFS, reason="Large result") def test_sample_with_row_count(session): """Tests sample using n (row count)""" @@ -904,7 +889,6 @@ def test_sample_with_row_count(session): assert len(df.sample(n=row_count + 10).collect()) == row_count -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC_LOCALFS, reason="Large result") def test_sample_with_frac(session): """Tests sample using frac""" @@ -926,7 +910,6 @@ def test_sample_with_frac(session): assert len(df.sample(frac=1.0).collect()) == row_count -@pytest.mark.localtest def test_sample_with_seed(session): row_count = 10000 temp_table_name = Utils.random_name_for_temp_object(TempObjectType.TABLE) @@ -972,7 +955,6 @@ def test_sample_with_sampling_method(session): df.drop_table() -@pytest.mark.localtest def test_sample_negative(session): """Tests negative test cases for sample""" row_count = 10000 @@ -997,7 +979,6 @@ def test_sample_negative(session): table.sample(frac=0.1, sampling_method="InvalidValue") -@pytest.mark.localtest def test_sample_on_join(session): """Tests running sample on a join statement""" row_count = 10000 @@ -1014,7 +995,6 @@ def test_sample_on_join(session): ) -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC_LOCALFS, reason="Large result") def test_sample_on_union(session): """Tests running sample on union statements""" @@ -1040,7 +1020,6 @@ def test_sample_on_union(session): ) -@pytest.mark.localtest def test_toDf(session): # to_df(*str) with 1 column df1 = session.create_dataframe([1, 2, 3]).to_df("a") @@ -1108,7 +1087,6 @@ def test_toDf(session): assert df6.schema.fields[0].name == "A" and df6.schema.fields[-1].name == "C" -@pytest.mark.localtest def test_toDF_negative_test(session): values = session.create_dataframe([[1, None], [2, "NotNull"], [3, None]]) @@ -1135,7 +1113,6 @@ def test_toDF_negative_test(session): assert "The number of columns doesn't match" in ex_info.value.args[0] -@pytest.mark.localtest def test_sort(session): df = session.create_dataframe( [(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (3, 3)] @@ -1181,7 +1158,6 @@ def test_sort(session): assert "sort() needs at least one sort expression" in ex_info.value.args[0] -@pytest.mark.localtest def test_select(session): df = session.create_dataframe([(1, "a", 10), (2, "b", 20), (3, "c", 30)]).to_df( ["a", "b", "c"] @@ -1251,7 +1227,6 @@ def test_select_negative_select(session): assert "SQL compilation error" in str(ex_info) -@pytest.mark.localtest def test_drop_and_dropcolumns(session): df = session.create_dataframe([(1, "a", 10), (2, "b", 20), (3, "c", 30)]).to_df( ["a", "b", "c"] @@ -1304,7 +1279,6 @@ def test_drop_and_dropcolumns(session): assert "Cannot drop all column" in str(ex_info) -@pytest.mark.localtest def test_dataframe_agg(session): df = session.create_dataframe([(1, "One"), (2, "Two"), (3, "Three")]).to_df( "empid", "name" @@ -1412,7 +1386,6 @@ def test_rollup(session): ) -@pytest.mark.localtest def test_groupby(session): df = session.create_dataframe( [ @@ -1759,7 +1732,6 @@ def test_createDataFrame_with_given_schema(session, local_testing_mode): Utils.check_answer(result, data, sort=False) -@pytest.mark.localtest def test_createDataFrame_with_given_schema_time(session): schema = StructType( [ @@ -2020,7 +1992,6 @@ def test_createDataFrame_with_given_schema_array_map_variant(session): Utils.check_answer(df, expected, sort=False) -@pytest.mark.localtest def test_variant_in_array_and_map(session, local_testing_mode): schema = StructType( [StructField("array", ArrayType(None)), StructField("map", MapType(None, None))] @@ -2030,7 +2001,6 @@ def test_variant_in_array_and_map(session, local_testing_mode): Utils.check_answer(df, [Row('[\n 1,\n "\\"\'"\n]', '{\n "a": "\\"\'"\n}')]) -@pytest.mark.localtest def test_escaped_character(session): df = session.create_dataframe(["'", "\\", "\n"]).to_df("a") res = df.collect() @@ -2041,7 +2011,6 @@ def test_escaped_character(session): IS_IN_STORED_PROC, reason="creating new sessions within stored proc is not supported", ) -@pytest.mark.localtest def test_create_or_replace_temporary_view(session, db_parameters, local_testing_mode): view_name = Utils.random_name_for_temp_object(TempObjectType.VIEW) view_name1 = f'"{view_name}%^11"' @@ -2084,7 +2053,6 @@ def test_create_or_replace_temporary_view(session, db_parameters, local_testing_ assert "does not exist or not authorized" in str(ex_info) -@pytest.mark.localtest def test_createDataFrame_with_schema_inference(session): df1 = session.create_dataframe([1, 2, 3]).to_df("int") Utils.check_answer(df1, [Row(1), Row(2), Row(3)]) @@ -2100,7 +2068,6 @@ def test_createDataFrame_with_schema_inference(session): Utils.check_answer(df2, [Row(True, "a"), Row(False, "b")], False) -@pytest.mark.localtest def test_create_nullable_dataframe_with_schema_inference(session): df = session.create_dataframe([(1, 1, None), (2, 3, True)]).to_df("a", "b", "c") assert ( @@ -2111,7 +2078,6 @@ def test_create_nullable_dataframe_with_schema_inference(session): Utils.check_answer(df, [Row(1, 1, None), Row(2, 3, True)]) -@pytest.mark.localtest def test_schema_inference_binary_type(session): df = session.create_dataframe( [ @@ -2126,7 +2092,6 @@ def test_schema_inference_binary_type(session): ) -@pytest.mark.localtest def test_primitive_array(session, local_testing_mode): schema = StructType([StructField("arr", ArrayType(None))]) df = session.create_dataframe([Row([1])], schema) @@ -2163,7 +2128,6 @@ def test_time_date_and_timestamp_test(session): ) -@pytest.mark.localtest def test_quoted_column_names(session, local_testing_mode): normalName = "NORMAL_NAME" lowerCaseName = '"lower_case"' @@ -2270,7 +2234,6 @@ def test_quoted_column_names(session, local_testing_mode): Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_column_names_without_surrounding_quote(session, local_testing_mode): normalName = "NORMAL_NAME" lowerCaseName = '"lower_case"' @@ -2322,7 +2285,6 @@ def test_column_names_without_surrounding_quote(session, local_testing_mode): Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_negative_test_for_user_input_invalid_quoted_name(session): df = session.create_dataframe([1, 2, 3]).to_df("a") with pytest.raises(SnowparkPlanException) as ex_info: @@ -2330,7 +2292,6 @@ def test_negative_test_for_user_input_invalid_quoted_name(session): assert "Invalid identifier" in str(ex_info) -@pytest.mark.localtest def test_clone_with_union_dataframe(session, local_testing_mode): table_name = Utils.random_name_for_temp_object(TempObjectType.TABLE) try: @@ -2354,7 +2315,6 @@ def test_clone_with_union_dataframe(session, local_testing_mode): Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_clone_with_unionall_dataframe(session, local_testing_mode): table_name = Utils.random_name_for_temp_object(TempObjectType.TABLE) try: @@ -2378,7 +2338,6 @@ def test_clone_with_unionall_dataframe(session, local_testing_mode): Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_dataframe_show_with_new_line(session): df = session.create_dataframe( ["line1\nline1.1\n", "line2", "\n", "line4", "\n\n", None] @@ -2434,7 +2393,6 @@ def test_dataframe_show_with_new_line(session): ) -@pytest.mark.localtest def test_negative_test_to_input_invalid_table_name_for_saveAsTable(session): df = session.create_dataframe([(1, None), (2, "NotNull"), (3, None)]).to_df( "a", "b" @@ -2444,7 +2402,6 @@ def test_negative_test_to_input_invalid_table_name_for_saveAsTable(session): assert re.compile("The object name .* is invalid.").match(ex_info.value.message) -@pytest.mark.localtest def test_negative_test_to_input_invalid_view_name_for_createOrReplaceView(session): df = session.create_dataframe([[2, "NotNull"]]).to_df(["a", "b"]) with pytest.raises(SnowparkInvalidObjectNameException) as ex_info: @@ -2452,7 +2409,6 @@ def test_negative_test_to_input_invalid_view_name_for_createOrReplaceView(sessio assert re.compile("The object name .* is invalid.").match(ex_info.value.message) -@pytest.mark.localtest def test_toDF_with_array_schema(session): df = session.create_dataframe([[1, "a"]]).to_df("a", "b") schema = df.schema @@ -2461,7 +2417,6 @@ def test_toDF_with_array_schema(session): assert schema.fields[1].name == "B" -@pytest.mark.localtest def test_sort_with_array_arg(session): df = session.create_dataframe([(1, 1, 1), (2, 0, 4), (1, 2, 3)]).to_df( "col1", "col2", "col3" @@ -2470,33 +2425,28 @@ def test_sort_with_array_arg(session): Utils.check_answer(df_sorted, [Row(1, 2, 3), Row(1, 1, 1), Row(2, 0, 4)], False) -@pytest.mark.localtest def test_select_with_array_args(session): df = session.create_dataframe([[1, 2]]).to_df("col1", "col2") df_selected = df.select(df.col("col1"), lit("abc"), df.col("col1") + df.col("col2")) Utils.check_answer(df_selected, Row(1, "abc", 3)) -@pytest.mark.localtest def test_select_string_with_array_args(session): df = session.create_dataframe([[1, 2, 3]]).to_df("col1", "col2", "col3") df_selected = df.select(["col1", "col2"]) Utils.check_answer(df_selected, [Row(1, 2)]) -@pytest.mark.localtest def test_drop_string_with_array_args(session): df = session.create_dataframe([[1, 2, 3]]).to_df("col1", "col2", "col3") Utils.check_answer(df.drop(["col3"]), [Row(1, 2)]) -@pytest.mark.localtest def test_drop_with_array_args(session): df = session.create_dataframe([[1, 2, 3]]).to_df("col1", "col2", "col3") Utils.check_answer(df.drop([df["col3"]]), [Row(1, 2)]) -@pytest.mark.localtest def test_agg_with_array_args(session): df = session.create_dataframe([[1, 2], [4, 5]]).to_df("col1", "col2") Utils.check_answer(df.agg([max(col("col1")), mean(col("col2"))]), [Row(4, 3.5)]) @@ -2576,7 +2526,6 @@ def test_rollup_string_with_array_args(session): ) -@pytest.mark.localtest def test_groupby_with_array_args(session): df = session.create_dataframe( [ @@ -2603,7 +2552,6 @@ def test_groupby_with_array_args(session): ) -@pytest.mark.localtest def test_groupby_string_with_array_args(session): df = session.create_dataframe( [ @@ -2630,7 +2578,6 @@ def test_groupby_string_with_array_args(session): ) -@pytest.mark.localtest def test_rename_basic(session): df = session.create_dataframe([[1, 2]], schema=["a", "b"]) df2 = df.with_column_renamed("b", "b1") @@ -2707,7 +2654,6 @@ def test_rename_to_df_and_joined_dataframe(session): Utils.check_answer(df5, [Row(1, 2, 1, 2)]) -@pytest.mark.localtest def test_rename_negative_test(session, local_testing_mode): df = session.create_dataframe([[1, 2]], schema=["a", "b"]) @@ -2791,7 +2737,6 @@ def test_with_columns_keep_order(session): ) -@pytest.mark.localtest def test_with_columns_input_doesnt_match_each_other(session): df = session.create_dataframe([Row(1, 2, 3)]).to_df(["a", "b", "c"]) with pytest.raises(ValueError) as ex_info: @@ -2802,7 +2747,6 @@ def test_with_columns_input_doesnt_match_each_other(session): ) -@pytest.mark.localtest def test_with_columns_replace_existing(session): df = session.create_dataframe([Row(1, 2, 3)]).to_df(["a", "b", "c"]) replaced = df.with_columns(["b", "d"], [lit(5), lit(6)]) @@ -2823,7 +2767,6 @@ def test_with_columns_replace_existing(session): ) -@pytest.mark.localtest def test_drop_duplicates(session): df = session.create_dataframe( [[1, 1, 1, 1], [1, 1, 1, 2], [1, 1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4]], @@ -2869,7 +2812,6 @@ def test_drop_duplicates(session): assert "The DataFrame does not contain the column named e." in str(exec_info) -@pytest.mark.localtest def test_consecutively_drop_duplicates(session): df = session.create_dataframe( [[1, 1, 1, 1], [1, 1, 1, 2], [1, 1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4]], @@ -2887,7 +2829,6 @@ def test_consecutively_drop_duplicates(session): assert row1 in [Row(1, 1, 1, 1), Row(1, 1, 1, 2), Row(1, 1, 2, 3), Row(1, 2, 3, 4)] -@pytest.mark.local def test_dropna(session, local_testing_mode): Utils.check_answer( TestData.double3(session, local_testing_mode).na.drop(thresh=1, subset=["a"]), @@ -2934,7 +2875,6 @@ def test_dropna(session, local_testing_mode): assert "how ('bad') should be 'any' or 'all'" in str(exc_info) -@pytest.mark.localtest def test_fillna(session, local_testing_mode): Utils.check_answer( TestData.null_data3(session, local_testing_mode).na.fill( @@ -2985,7 +2925,6 @@ def test_fillna(session, local_testing_mode): assert "The DataFrame does not contain the column named" in str(ex_info) -@pytest.mark.localtest def test_replace(session, local_testing_mode): res = ( TestData.null_data3(session, local_testing_mode) @@ -3095,7 +3034,6 @@ def test_explain(session): assert "Logical Execution Plan" not in explain_string -@pytest.mark.localtest def test_to_local_iterator(session): df = session.create_dataframe([1, 2, 3]).toDF("a") iterator = df.to_local_iterator() @@ -3151,7 +3089,6 @@ def check_random_split_result(weights, seed=None): check_random_split_result([0.11111, 0.6666, 1.3]) -@pytest.mark.localtest def test_random_split_negative(session): df1 = session.range(10) @@ -3168,7 +3105,6 @@ def test_random_split_negative(session): assert "weights must be positive numbers" in str(ex_info) -@pytest.mark.localtest def test_to_df(session): df = session.create_dataframe( [[1], [3], [5], [7], [9]], diff --git a/tests/integ/scala/test_datatype_suite.py b/tests/integ/scala/test_datatype_suite.py index d0d45185487..022d68e40ff 100644 --- a/tests/integ/scala/test_datatype_suite.py +++ b/tests/integ/scala/test_datatype_suite.py @@ -209,7 +209,6 @@ def test_verify_datatypes_reference(session): Utils.is_schema_same(df.schema, expected_schema, case_sensitive=False) -@pytest.mark.localtest def test_verify_datatypes_reference2(session): d1 = DecimalType(2, 1) d2 = DecimalType(2, 1) diff --git a/tests/integ/scala/test_file_operation_suite.py b/tests/integ/scala/test_file_operation_suite.py index c14cc5fee29..2dc424dde09 100644 --- a/tests/integ/scala/test_file_operation_suite.py +++ b/tests/integ/scala/test_file_operation_suite.py @@ -89,7 +89,6 @@ def temp_stage(session, resources_path, local_testing_mode): Utils.drop_stage(session, tmp_stage_name) -@pytest.mark.localtest def test_put_with_one_file( session, temp_stage, path1, path2, path3, local_testing_mode ): @@ -184,7 +183,6 @@ def test_put_with_one_file( assert third_result.message == "" -@pytest.mark.localtest def test_put_with_one_file_twice(session, temp_stage, path1, local_testing_mode): stage_prefix = f"prefix_{random_alphanumeric_name()}" stage_with_prefix = f"@{temp_stage}/{stage_prefix}/" @@ -221,7 +219,6 @@ def test_put_with_one_file_twice(session, temp_stage, path1, local_testing_mode) @pytest.mark.skipif( IS_IN_STORED_PROC, reason="cannot write file to root directory in sandbox" ) -@pytest.mark.localtest def test_put_with_one_relative_path_file( session, temp_stage, path1, local_testing_mode ): @@ -255,7 +252,6 @@ def test_put_with_one_relative_path_file( os.remove(file_name) -@pytest.mark.localtest def test_put_with_multiple_files( session, temp_stage, temp_source_directory, path1, path2, path3, local_testing_mode ): @@ -280,7 +276,6 @@ def test_put_with_multiple_files( assert all(row.status in ("UPLOADED", "SKIPPED") for row in second_result) -@pytest.mark.localtest def test_put_negative( session, temp_stage, temp_source_directory, path1, local_testing_mode ): @@ -302,7 +297,6 @@ def test_put_negative( assert "does not exist or not authorized." in str(stage_not_exist_info) -@pytest.mark.localtest def test_put_stream_with_one_file( session, temp_stage, path1, path2, path3, path4, local_testing_mode ): @@ -385,7 +379,6 @@ def test_put_stream_with_one_file( assert fourth_result.message == "" -@pytest.mark.localtest def test_put_stream_with_one_file_twice(session, temp_stage, path1, local_testing_mode): stage_prefix = f"prefix_{random_alphanumeric_name()}" stage_with_prefix = f"@{temp_stage}/{stage_prefix}" @@ -424,7 +417,6 @@ def test_put_stream_with_one_file_twice(session, temp_stage, path1, local_testin assert second_result.message == "" -@pytest.mark.locatest def test_put_stream_negative(session, temp_stage, path1, local_testing_mode): stage_prefix = f"prefix_{random_alphanumeric_name()}" stage_with_prefix = f"@{temp_stage}/{stage_prefix}" @@ -463,7 +455,6 @@ def test_put_stream_negative(session, temp_stage, path1, local_testing_mode): assert ex_info.value.error_code == "1408" if not local_testing_mode else True -@pytest.mark.localtest @pytest.mark.parametrize("with_file_prefix", [True, False]) def test_get_one_file( session, @@ -510,7 +501,6 @@ def test_get_one_file( os.remove(f"{temp_target_directory}/{file_name}") -@pytest.mark.localtest def test_get_multiple_files( session, temp_stage, temp_target_directory, path1, path2, path3, local_testing_mode ): @@ -544,7 +534,6 @@ def test_get_multiple_files( os.remove(f"{temp_target_directory}/{os.path.basename(path3)}") -@pytest.mark.localtest @pytest.mark.skipif( IS_IN_STORED_PROC, reason="SNOW-570941: get with pattern is not supported" ) @@ -589,7 +578,6 @@ def test_get_with_pattern_and_relative_target_directory( os.remove(f"{temp_target_directory}/{os.path.basename(path3)}") -@pytest.mark.localtest @pytest.mark.skip("Error 'max_workers must be greater than 0' on Azure and GCP") def test_get_negative_test( session, temp_stage, temp_target_directory, path1, local_testing_mode @@ -619,7 +607,6 @@ def test_get_negative_test( shutil.rmtree("not_exist_target_test") -@pytest.mark.localtest @pytest.mark.skip( "Python connector doesn't have COLLISION in the result" "This error sometimes happen probably because python-connector doesn't handle file conflict well." @@ -648,7 +635,6 @@ def test_get_negative_test_file_name_collision( shutil.rmtree(target_directory) -@pytest.mark.localtest @pytest.mark.parametrize("auto_compress", [True, False]) @pytest.mark.parametrize("with_file_prefix", [True, False]) def test_get_stream( @@ -674,7 +660,6 @@ def test_get_stream( fd.close() -@pytest.mark.localtest def test_get_stream_negative(session, temp_stage): stage_prefix = f"prefix_{random_alphanumeric_name()}" stage_with_prefix = f"@{temp_stage}/{stage_prefix}/" @@ -694,7 +679,6 @@ def test_get_stream_negative(session, temp_stage): assert "the file does not exist" in str(ex_info) -@pytest.mark.localtest def test_quoted_local_file_name( session, temp_stage, tmp_path_factory, local_testing_mode ): @@ -730,7 +714,6 @@ def test_quoted_local_file_name( shutil.rmtree(special_directory) -@pytest.mark.localtest def test_path_with_special_chars(session, tmp_path_factory, local_testing_mode): stage_prefix = f"prefix_{random_alphanumeric_name()}" temp_stage = "s peci'al chars" diff --git a/tests/integ/scala/test_function_suite.py b/tests/integ/scala/test_function_suite.py index 41a979748be..bcac7df67ac 100644 --- a/tests/integ/scala/test_function_suite.py +++ b/tests/integ/scala/test_function_suite.py @@ -234,7 +234,6 @@ def parameter_override(session, parameter, value, enabled=True): session.sql(f"alter session unset {parameter}").collect() -@pytest.mark.localtest def test_col(session): test_data1 = TestData.test_data1(session) Utils.check_answer(test_data1.select(col("bool")), [Row(True), Row(False)]) @@ -245,7 +244,6 @@ def test_col(session): Utils.check_answer(test_data1.select(col("num")), [Row(1), Row(2)]) -@pytest.mark.localtest def test_lit(session): res = TestData.test_data1(session).select(lit(1)).collect() assert res == [Row(1), Row(1)] @@ -471,7 +469,6 @@ def test_variance(session): ) -@pytest.mark.localtest def test_coalesce(session): Utils.check_answer( TestData.null_data2(session).select(coalesce(col("A"), col("B"), col("C"))), @@ -524,7 +521,6 @@ def test_random(session): df.select(random()).collect() -@pytest.mark.localtest def test_sqrt(session): Utils.check_answer( TestData.test_data1(session).select(sqrt(col("NUM"))), @@ -539,7 +535,6 @@ def test_sqrt(session): ) -@pytest.mark.localtest def test_abs(session): Utils.check_answer( TestData.number2(session).select(abs(col("X"))), [Row(1), Row(0), Row(5)], False @@ -607,7 +602,6 @@ def test_log(session): ) -@pytest.mark.localtest def test_pow(session): Utils.check_answer( TestData.double2(session).select(pow(col("A"), col("B"))), @@ -636,7 +630,6 @@ def test_builtin_function(session): ) -@pytest.mark.localtest def test_sub_string(session): Utils.check_answer( TestData.string1(session).select(substring(col("A"), lit(2), lit(4))), @@ -695,7 +688,6 @@ def test_datediff_negative(session): TestData.timestamp1(session).select(dateadd(7, lit(1), col("a"))) -@pytest.mark.localtest @pytest.mark.parametrize( "part,expected", [ @@ -752,7 +744,6 @@ def test_dateadd(part, expected, session): ) -@pytest.mark.localtest @pytest.mark.parametrize( "part,expected", [ @@ -947,7 +938,6 @@ def test_dateadd_timestamp(part, expected, session, local_testing_mode): LocalTimezone.set_local_timezone() -@pytest.mark.localtest @pytest.mark.parametrize( "part", [ @@ -989,7 +979,6 @@ def test_dateadd_tz(tz_type, tzinfo, part, session): ) -@pytest.mark.localtest @pytest.mark.parametrize( "part,expected", [ @@ -1053,7 +1042,6 @@ def test_date_part_timestamp(part, expected, session): LocalTimezone.set_local_timezone() -@pytest.mark.localtest @pytest.mark.parametrize( "part,expected", [ @@ -1084,7 +1072,6 @@ def test_date_part_date(part, expected, session): LocalTimezone.set_local_timezone() -@pytest.mark.localtest @pytest.mark.parametrize( "part,expected", [ @@ -1253,7 +1240,6 @@ def test_date_trunc(part, expected, session, local_testing_mode): LocalTimezone.set_local_timezone() -@pytest.mark.localtest def test_date_trunc_negative(session, local_testing_mode): df = TestData.datetime_primitives1(session) @@ -1266,7 +1252,6 @@ def test_date_trunc_negative(session, local_testing_mode): df.select(date_trunc("dow", "date")).collect() -@pytest.mark.localtest def test_current_session(session): df = TestData.integer1(session) rows = df.select(current_session()).collect() @@ -1278,7 +1263,6 @@ def test_current_session(session): ), "All session values should be the same after call to current_session" -@pytest.mark.localtest def test_current_database(session): df = TestData.integer1(session) rows = df.select(current_database()).collect() @@ -1290,13 +1274,11 @@ def test_current_database(session): ), "All database values should be the same after call to current_database" -@pytest.mark.localtest def test_dateadd_negative(session): with pytest.raises(ValueError, match="part must be a string"): TestData.date1(session).select(dateadd(7, lit(1), "a")) -@pytest.mark.localtest def test_to_timestamp(session): long1 = TestData.long1(session) Utils.check_answer( @@ -1348,7 +1330,6 @@ def test_to_timestamp(session): ) -@pytest.mark.localtest def test_to_time(session, local_testing_mode): # basic string expr df = TestData.time_primitives1(session) @@ -1427,7 +1408,6 @@ def test_to_time(session, local_testing_mode): ) -@pytest.mark.localtest @pytest.mark.parametrize( "to_type,expected", [ @@ -1518,7 +1498,6 @@ def test_to_timestamp_all(to_type, expected, session, local_testing_mode): LocalTimezone.set_local_timezone() -@pytest.mark.localtest @pytest.mark.parametrize( "to_type,expected", [ @@ -1583,7 +1562,6 @@ def test_to_timestamp_fmt_string(to_type, expected, session, local_testing_mode) LocalTimezone.set_local_timezone() -@pytest.mark.localtest @pytest.mark.parametrize( "to_type,expected", [ @@ -1652,7 +1630,6 @@ def test_to_timestamp_fmt_column(to_type, expected, session, local_testing_mode) LocalTimezone.set_local_timezone() -@pytest.mark.localtest @pytest.mark.parametrize( "to_type,expected", [ @@ -1759,7 +1736,6 @@ def test_to_timestamp_numeric_scale_column( LocalTimezone.set_local_timezone() -@pytest.mark.localtest @pytest.mark.parametrize( "to_type,expected", [ @@ -1884,7 +1860,6 @@ def test_to_timestamp_variant_column(to_type, expected, session, local_testing_m LocalTimezone.set_local_timezone() -@pytest.mark.localtest def test_to_date(session): expected1 = expected2 = [ Row(date(2023, 3, 16)), @@ -2377,7 +2352,6 @@ def test_split(session): ) -@pytest.mark.localtest def test_contains(session): Utils.check_answer( TestData.string4(session).select(contains(col("a"), lit("app"))), @@ -2392,7 +2366,6 @@ def test_contains(session): ) -@pytest.mark.localtest @pytest.mark.parametrize("col_a", ["a", col("a")]) def test_startswith(session, col_a): Utils.check_answer( @@ -2402,7 +2375,6 @@ def test_startswith(session, col_a): ) -@pytest.mark.localtest @pytest.mark.parametrize("col_a", ["a", col("a")]) def test_endswith(session, col_a): Utils.check_answer( @@ -2529,7 +2501,6 @@ def test_json_extract_path_text(session): ) -@pytest.mark.localtest def test_parse_json(session): null_json1 = TestData.null_json1(session) Utils.check_answer( @@ -2576,7 +2547,6 @@ def test_parse_xml(session): ) -@pytest.mark.localtest def test_strip_null_value(session): df = TestData.null_json1(session) @@ -3148,7 +3118,6 @@ def test_objectagg(session): ) -@pytest.mark.localtest def test_object_construct(session): Utils.check_answer( TestData.object1(session).select(object_construct(col("key"), col("value"))), @@ -3181,7 +3150,6 @@ def test_object_construct(session): ) -@pytest.mark.localtest def test_object_construct_keep_null(session): Utils.check_answer( TestData.object3(session).select( @@ -3806,7 +3774,6 @@ def test_timestamp_tz_from_parts(session, local_testing_mode): ) -@pytest.mark.localtest def test_convert_timezone(session, local_testing_mode): with parameter_override( session, @@ -3902,7 +3869,6 @@ def test_time_from_parts(session): ) -@pytest.mark.localtest def test_columns_from_timestamp_parts(): func_name = "test _columns_from_timestamp_parts" y, m, d = _columns_from_timestamp_parts(func_name, "year", "month", 8) @@ -3921,13 +3887,11 @@ def test_columns_from_timestamp_parts(): assert s._expression.value == 17 -@pytest.mark.localtest def test_columns_from_timestamp_parts_negative(): with pytest.raises(ValueError, match="Incorrect number of args passed"): _columns_from_timestamp_parts("neg test", "year", "month") -@pytest.mark.localtest def test_timestamp_from_parts_internal(): func_name = "test _timestamp_from_parts_internal" date_expr, time_expr = _timestamp_from_parts_internal(func_name, "date", "time") @@ -3984,7 +3948,6 @@ def test_timestamp_from_parts_internal(): assert s._expression.name == '"S"' -@pytest.mark.localtest def test_timestamp_from_parts_internal_negative(): func_name = "negative test" with pytest.raises(ValueError, match="expected 2 or 6 required arguments"): @@ -4351,7 +4314,6 @@ def test_get_path(session, v, k): ) -@pytest.mark.localtest def test_get(session): Utils.check_answer( TestData.object2(session).select(get(col("obj"), col("k"))), @@ -4477,7 +4439,6 @@ def test_approx_percentile_combine(session, col_a, col_b): ) -@pytest.mark.localtest def test_iff(session, local_testing_mode): df = session.create_dataframe( [(True, 2, 2, 4), (False, 12, 12, 14), (True, 22, 23, 24)], @@ -4540,7 +4501,6 @@ def test_dense_rank(session): ) -@pytest.mark.localtest @pytest.mark.parametrize("col_z", ["Z", col("Z")]) def test_lag(session, col_z, local_testing_mode): Utils.check_answer( @@ -4568,7 +4528,6 @@ def test_lag(session, col_z, local_testing_mode): ) -@pytest.mark.localtest @pytest.mark.parametrize("col_z", ["Z", col("Z")]) def test_lead(session, col_z, local_testing_mode): Utils.check_answer( @@ -4596,7 +4555,6 @@ def test_lead(session, col_z, local_testing_mode): ) -@pytest.mark.localtest @pytest.mark.parametrize("col_z", ["Z", col("Z")]) def test_last_value(session, col_z): Utils.check_answer( @@ -4608,7 +4566,6 @@ def test_last_value(session, col_z): ) -@pytest.mark.localtest @pytest.mark.parametrize("col_z", ["Z", col("Z")]) def test_first_value(session, col_z): Utils.check_answer( @@ -4925,7 +4882,6 @@ def test_ascii(session, col_B): ) -@pytest.mark.localtest @pytest.mark.parametrize( "func,expected", [ diff --git a/tests/integ/scala/test_large_dataframe_suite.py b/tests/integ/scala/test_large_dataframe_suite.py index d3f4df45d8b..4b7b6364021 100644 --- a/tests/integ/scala/test_large_dataframe_suite.py +++ b/tests/integ/scala/test_large_dataframe_suite.py @@ -126,7 +126,6 @@ def check_plan(df, data): session._use_scoped_temp_objects = origin_use_scoped_temp_objects_setting -@pytest.mark.localtest def test_create_dataframe_for_large_values_basic_types(session): schema = StructType( [ diff --git a/tests/integ/scala/test_session_suite.py b/tests/integ/scala/test_session_suite.py index e66b6e13e89..a6d0ddf4dd5 100644 --- a/tests/integ/scala/test_session_suite.py +++ b/tests/integ/scala/test_session_suite.py @@ -48,7 +48,6 @@ def test_invalid_configs(session, db_parameters): assert "Incorrect username or password was specified" in str(ex_info) -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="db_parameters is not available") def test_current_database_and_schema(session, db_parameters, local_testing_mode): database = quote_name(db_parameters["database"]) @@ -75,7 +74,6 @@ def test_current_database_and_schema(session, db_parameters, local_testing_mode) session._run_query(f"use schema {schema}") -@pytest.mark.localtest def test_quote_all_database_and_schema_names(session): def is_quoted(name: str) -> bool: return name[0] == '"' and name[-1] == '"' @@ -84,7 +82,6 @@ def is_quoted(name: str) -> bool: assert is_quoted(session.get_current_schema()) -@pytest.mark.localtest def test_create_dataframe_sequence(session): df = session.create_dataframe([[1, "one", 1.0], [2, "two", 2.0]]) assert [field.name for field in df.schema.fields] == ["_1", "_2", "_3"] @@ -100,7 +97,6 @@ def test_create_dataframe_sequence(session): assert df.collect() == [Row("one"), Row("two")] -@pytest.mark.localtest def test_create_dataframe_namedtuple(session): class P1(NamedTuple): a: int @@ -114,7 +110,8 @@ class P1(NamedTuple): # this test requires the parameters used for connection has `public role`, # and the public role has the privilege to access the current database and # schema of the current role -@pytest.mark.localtest + + @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Not enough privilege to run this test") def test_get_schema_database_works_after_use_role(session): current_role = session.get_current_role() @@ -161,7 +158,6 @@ def test_select_current_client(session): assert get_version() in current_client -@pytest.mark.localtest def test_negative_test_to_invalid_table_name(session): with pytest.raises(SnowparkInvalidObjectNameException) as ex_info: session.table("negative.test.invalid.table.name") @@ -170,7 +166,6 @@ def test_negative_test_to_invalid_table_name(session): ) -@pytest.mark.localtest def test_create_dataframe_from_seq_none(session, local_testing_mode): assert session.create_dataframe([None, 1]).to_df("int").collect() == [ Row(None), @@ -220,7 +215,6 @@ def test_dataframe_created_before_session_close_are_not_usable_after_closing_ses assert ex_info.value.error_code == "1404" -@pytest.mark.localtest def test_load_table_from_array_multipart_identifier(session): name = Utils.random_name_for_temp_object(TempObjectType.TABLE) session.create_dataframe( @@ -232,7 +226,6 @@ def test_load_table_from_array_multipart_identifier(session): assert len(session.table(multipart).schema.fields) == 1 -@pytest.mark.localtest def test_session_info(session): session_info = session._session_info assert get_version() in session_info diff --git a/tests/integ/scala/test_table_suite.py b/tests/integ/scala/test_table_suite.py index 42a84affd84..66d0bff2b6f 100644 --- a/tests/integ/scala/test_table_suite.py +++ b/tests/integ/scala/test_table_suite.py @@ -99,7 +99,6 @@ def table_with_time(session: Session, local_testing_mode: bool): session.table(table_name).drop_table() -@pytest.mark.localtest def test_read_snowflake_table(session, table_name_1): df = session.table(table_name_1) Utils.check_answer(df, [Row(1), Row(2), Row(3)]) @@ -115,7 +114,6 @@ def test_read_snowflake_table(session, table_name_1): Utils.check_answer(df3, [Row(1), Row(2), Row(3)]) -@pytest.mark.localtest def test_save_as_snowflake_table(session, table_name_1, local_testing_mode): df = session.table(table_name_1) assert df.collect() == [Row(1), Row(2), Row(3)] @@ -157,7 +155,6 @@ def test_save_as_snowflake_table(session, table_name_1, local_testing_mode): session.table(table_name_3).drop_table() -@pytest.mark.localtest def test_multipart_identifier(session, table_name_1, local_testing_mode): name1 = table_name_1 name2 = session.get_current_schema() + "." + name1 @@ -190,7 +187,6 @@ def test_multipart_identifier(session, table_name_1, local_testing_mode): session.table(name6).drop_table() -@pytest.mark.localtest def test_write_table_to_different_schema( session, temp_schema, table_name_1, local_testing_mode ): @@ -203,14 +199,12 @@ def test_write_table_to_different_schema( session.table(name2).drop_table() -@pytest.mark.localtest def test_read_from_different_schema(session, temp_schema, temp_table_name): table_from_different_schema = f"{temp_schema}.{temp_table_name}" df = session.table(table_from_different_schema) Utils.check_answer(df, [Row("abc")]) -@pytest.mark.localtest def test_quotes_upper_and_lower_case_name(session, table_name_1): tested_table_names = [ '"' + table_name_1 + '"', @@ -270,7 +264,6 @@ def test_table_with_time_type(session, table_with_time): ) -@pytest.mark.localtest def test_consistent_table_name_behaviors(session, local_testing_mode): table_name = Utils.random_name_for_temp_object(TempObjectType.TABLE) db = session.get_current_database() diff --git a/tests/integ/scala/test_udf_suite.py b/tests/integ/scala/test_udf_suite.py index b3a0d04cf18..807b1ca7217 100644 --- a/tests/integ/scala/test_udf_suite.py +++ b/tests/integ/scala/test_udf_suite.py @@ -89,7 +89,6 @@ def setup(session, resources_path, local_testing_mode): Utils.drop_stage(session, tmp_stage_name) -@pytest.mark.localtest def test_basic_udf_function(session): df = session.table(table1) double_udf = udf( @@ -98,7 +97,6 @@ def test_basic_udf_function(session): Utils.check_answer(df.select(double_udf("a")).collect(), [Row(2), Row(4), Row(6)]) -@pytest.mark.localtest def test_child_expression(session): df = session.table(table1) double_udf = udf( @@ -109,7 +107,6 @@ def test_child_expression(session): ) -@pytest.mark.localtest def test_empty_expression(session): df = session.table(table1) const_udf = udf(lambda: 1, return_type=IntegerType(), input_types=[]) @@ -267,7 +264,6 @@ def test_view_with_udf(session): ) -@pytest.mark.localtest def test_string_return_type(session): df = session.table(table1) prefix = "Hello" @@ -286,7 +282,6 @@ def test_string_return_type(session): ) -@pytest.mark.localtest def test_large_closure(session): df = session.table(table1) factor = 64 @@ -300,7 +295,6 @@ def test_large_closure(session): assert rows[1][0].startswith(long_string) -@pytest.mark.localtest def test_udf_function_with_multiple_columns(session): df = session.table(table2) sum_udf = udf( @@ -318,7 +312,6 @@ def test_udf_function_with_multiple_columns(session): ) -@pytest.mark.localtest def test_call_udf_api(session): df = session.table(table1) function_name = Utils.random_name_for_temp_object(TempObjectType.FUNCTION) @@ -340,7 +333,6 @@ def test_call_udf_api(session): ) -@pytest.mark.localtest def test_long_type(session): df = session.create_dataframe([1, 2, 3]).to_df("a") long_udf = udf(lambda x: x + x, return_type=LongType(), input_types=[LongType()]) @@ -354,7 +346,6 @@ def test_long_type(session): ) -@pytest.mark.localtest def test_short_type(session): df = session.create_dataframe([1, 2, 3]).to_df("a") short_udf = udf(lambda x: x + x, return_type=ShortType(), input_types=[ShortType()]) @@ -368,7 +359,6 @@ def test_short_type(session): ) -@pytest.mark.localtest def test_float_type(session): df = session.create_dataframe([1.1, 2.2, 3.3]).to_df("a") float_udf = udf(lambda x: x + x, return_type=FloatType(), input_types=[FloatType()]) @@ -382,7 +372,6 @@ def test_float_type(session): ) -@pytest.mark.localtest def test_double_type(session): df = session.create_dataframe([1.01, 2.01, 3.01]).to_df("a") double_udf = udf( @@ -398,7 +387,6 @@ def test_double_type(session): ) -@pytest.mark.localtest def test_boolean_type(session): df = session.create_dataframe([[1, 1], [2, 2], [3, 4]]).to_df("a", "b") boolean_udf = udf( @@ -416,7 +404,6 @@ def test_boolean_type(session): ) -@pytest.mark.localtest def test_binary_type(session): data = ["Hello", "World"] bytes_data = [bytes(s, "utf8") for s in data] @@ -665,7 +652,6 @@ def vector_add(v): ) -@pytest.mark.localtest def test_variant_string_input(session): @udf(return_type=StringType(), input_types=[VariantType()]) def variant_string_input_udf(v): @@ -692,7 +678,6 @@ def variant_binary_input_udf(v): ) -@pytest.mark.localtest def test_variant_boolean_input(session): @udf(return_type=BooleanType(), input_types=[VariantType()]) def variant_boolean_input_udf(v): @@ -704,7 +689,6 @@ def variant_boolean_input_udf(v): ) -@pytest.mark.localtest def test_variant_number_input(session): @udf(return_type=IntegerType(), input_types=[VariantType()]) def variant_number_input_udf(v): @@ -827,7 +811,6 @@ def variant_null_input_udf(v): ) -@pytest.mark.localtest def test_variant_string_output(session): @udf(return_type=VariantType(), input_types=[VariantType()]) def variant_string_output_udf(_): @@ -841,7 +824,8 @@ def variant_string_output_udf(_): # The behavior of Variant("null") in Python UDF is different from the one in Java UDF # Given a string "null", Python UDF will just a string "null", instead of NULL value -@pytest.mark.localtest + + def test_variant_null_string_output(session): @udf(return_type=VariantType(), input_types=[VariantType()]) def variant_null_string_output_udf(_): @@ -855,7 +839,6 @@ def variant_null_string_output_udf(_): ) -@pytest.mark.localtest def test_variant_number_output(session): @udf(return_type=VariantType(), input_types=[VariantType()]) def variant_int_output_udf(_): @@ -890,7 +873,6 @@ def variant_float_output_udf(_): # ).collect() == [Row("1.1")] -@pytest.mark.localtest def test_variant_boolean_output(session): @udf(return_type=VariantType(), input_types=[VariantType()]) def variant_boolean_output_udf(_): @@ -986,7 +968,6 @@ def variant_date_output_udf(_): ) -@pytest.mark.localtest def test_array_variant(session): @udf(return_type=ArrayType(VariantType()), input_types=[ArrayType(VariantType())]) def variant_udf(v): @@ -1016,7 +997,6 @@ def variant_udf_none_if_true(_): ) -@pytest.mark.localtest def test_map_variant(session): @udf( return_type=MapType(StringType(), VariantType()), @@ -1055,7 +1035,6 @@ def variant_udf_none_if_true(_): ) -@pytest.mark.localtest def test_negative_test_to_input_invalid_func_name(session): func_name = "negative test invalid name" with pytest.raises(SnowparkClientException) as ex_info: diff --git a/tests/integ/scala/test_update_delete_merge_suite.py b/tests/integ/scala/test_update_delete_merge_suite.py index 5a0ec56940c..6666264baf9 100644 --- a/tests/integ/scala/test_update_delete_merge_suite.py +++ b/tests/integ/scala/test_update_delete_merge_suite.py @@ -34,7 +34,6 @@ table_name3 = Utils.random_name_for_temp_object(TempObjectType.TABLE) -@pytest.mark.localtest def test_update_rows_in_table(session): TestData.test_data2(session).write.save_as_table( table_name, mode="overwrite", table_type="temporary" @@ -94,7 +93,6 @@ def test_update_rows_nondeterministic_update(session): session.sql("alter session unset ERROR_ON_NONDETERMINISTIC_UPDATE").collect() -@pytest.mark.localtest def test_delete_rows_in_table(session): TestData.test_data2(session).write.save_as_table( table_name, mode="overwrite", table_type="temporary" @@ -123,7 +121,6 @@ def test_delete_rows_in_table(session): assert "condition should also be provided if source is provided" in str(ex_info) -@pytest.mark.localtest def test_update_with_join(session): TestData.test_data2(session).write.save_as_table( table_name, mode="overwrite", table_type="temporary" @@ -151,7 +148,6 @@ def test_update_with_join(session): ) -@pytest.mark.localtest def test_update_with_join_involving_ambiguous_columns(session): TestData.test_data2(session).write.save_as_table( table_name, mode="overwrite", table_type="temporary" @@ -180,7 +176,6 @@ def test_update_with_join_involving_ambiguous_columns(session): ) -@pytest.mark.localtest def test_update_with_join_with_aggregated_source_data(session): tmp = session.createDataFrame([[0, 10]], schema=["k", "v"]) tmp.write.save_as_table(table_name, mode="overwrite", table_type="temporary") @@ -191,7 +186,6 @@ def test_update_with_join_with_aggregated_source_data(session): Utils.check_answer(target, [Row(0, 11)]) -@pytest.mark.localtest def test_delete_with_join(session): TestData.test_data2(session).write.save_as_table( table_name, mode="overwrite", table_type="temporary" @@ -212,7 +206,6 @@ def test_delete_with_join(session): Utils.check_answer(t2, [Row(3, "C"), Row(6, "F")]) -@pytest.mark.localtest def test_delete_with_join_involving_ambiguous_columns(session): TestData.test_data2(session).write.save_as_table( table_name, mode="overwrite", table_type="temporary" @@ -234,7 +227,6 @@ def test_delete_with_join_involving_ambiguous_columns(session): Utils.check_answer(up, [Row(3, "C"), Row(6, "F")]) -@pytest.mark.localtest def test_delete_with_join_with_aggregated_source_data(session): tmp = session.createDataFrame([(0, 1), (0, 2), (0, 3)], schema=["k", "v"]) tmp.write.save_as_table(table_name, mode="overwrite", table_type="temporary") @@ -245,7 +237,6 @@ def test_delete_with_join_with_aggregated_source_data(session): Utils.check_answer(target, [Row(0, 1), Row(0, 3)]) -@pytest.mark.localtest def test_merge_with_update_clause_only(session): target_df = session.createDataFrame( [(10, "old"), (10, "too_old"), (11, "old")], schema=["id", "desc"] @@ -279,7 +270,6 @@ def test_merge_with_update_clause_only(session): Utils.check_answer(target, [Row(10, "new"), Row(10, "too_old"), Row(11, "old")]) -@pytest.mark.localtest def test_merge_with_delete_clause_only(session): target_df = session.createDataFrame( [(10, "old"), (10, "too_old"), (11, "old")], schema=["id", "desc"] @@ -302,7 +292,6 @@ def test_merge_with_delete_clause_only(session): Utils.check_answer(target, [Row(10, "too_old"), Row(11, "old")]) -@pytest.mark.localtest def test_merge_with_insert_clause_only(session): target_df = session.createDataFrame( [(10, "old"), (11, "new")], schema=["id", "desc"] @@ -343,7 +332,6 @@ def test_merge_with_insert_clause_only(session): Utils.check_answer(target, [Row(10, "old"), Row(11, "new"), Row(12, "new")]) -@pytest.mark.localtest def test_merge_with_matched_and_not_matched_clauses(session): target_df = session.createDataFrame( [(10, "old"), (10, "too_old"), (11, "old")], schema=["id", "desc"] @@ -372,7 +360,6 @@ def test_merge_with_matched_and_not_matched_clauses(session): ) -@pytest.mark.localtest def test_merge_with_aggregated_source(session): target_df = session.createDataFrame([(0, 10)], schema=["k", "v"]) target_df.write.save_as_table(table_name, mode="overwrite", table_type="temporary") @@ -391,7 +378,6 @@ def test_merge_with_aggregated_source(session): Utils.check_answer(target, [Row(0, 12)]) -@pytest.mark.localtest def test_merge_with_multiple_clause_conditions(session): schema = StructType( [StructField("k", IntegerType()), StructField("v", IntegerType())] @@ -428,7 +414,6 @@ def test_merge_with_multiple_clause_conditions(session): ) -@pytest.mark.localtest def test_copy(session): df = session.createDataFrame([1, 2], schema=["a"]) df.write.save_as_table(table_name, mode="overwrite", table_type="temporary") @@ -440,7 +425,6 @@ def test_copy(session): Utils.check_answer(session.table(table_name), [Row(2)]) -@pytest.mark.localtest def test_match_clause_negative(session): with pytest.raises(SnowparkTableException) as ex_info: WhenMatchedClause().update({}).delete() @@ -454,7 +438,6 @@ def test_match_clause_negative(session): ) -@pytest.mark.localtest def test_update_clause_negative(session): target_df = session.createDataFrame( [(10, "old"), (10, "too_old"), (11, "old")], schema=["id", "desc"] @@ -474,7 +457,6 @@ def test_update_clause_negative(session): assert ex_info.value.error_code == "1115" -@pytest.mark.localtest def test_merge_clause_negative(session): target_df = session.createDataFrame( [(10, "old"), (10, "too_old"), (11, "old")], schema=["id", "desc"] @@ -494,7 +476,6 @@ def test_merge_clause_negative(session): ) -@pytest.mark.localtest def test_update_with_large_dataframe(session): from snowflake.snowpark._internal.analyzer import analyzer @@ -523,7 +504,6 @@ def test_update_with_large_dataframe(session): analyzer.ARRAY_BIND_THRESHOLD = original_value -@pytest.mark.localtest def test_delete_with_large_dataframe(session): from snowflake.snowpark._internal.analyzer import analyzer @@ -542,7 +522,6 @@ def test_delete_with_large_dataframe(session): analyzer.ARRAY_BIND_THRESHOLD = original_value -@pytest.mark.localtest def test_merge_with_large_dataframe(session): from snowflake.snowpark._internal.analyzer import analyzer @@ -578,7 +557,6 @@ def test_merge_with_large_dataframe(session): analyzer.ARRAY_BIND_THRESHOLD = original_value -@pytest.mark.localtest def test_update_with_join_involving_null_values(session): t1_name = Utils.random_table_name() t2_name = Utils.random_table_name() diff --git a/tests/integ/scala/test_view_suite.py b/tests/integ/scala/test_view_suite.py index 063ca7f4351..4d0a4e87225 100644 --- a/tests/integ/scala/test_view_suite.py +++ b/tests/integ/scala/test_view_suite.py @@ -21,7 +21,6 @@ from tests.utils import TestData, Utils -@pytest.mark.localtest def test_create_view(session, local_testing_mode): view_name = Utils.random_name_for_temp_object(TempObjectType.VIEW) try: @@ -64,7 +63,6 @@ def test_comment_on_view(session, local_testing_mode, is_temp): Utils.drop_view(session, view_name) -@pytest.mark.localtest def test_view_name_with_special_character(session, local_testing_mode): view_name = Utils.random_name_for_temp_object(TempObjectType.VIEW) try: @@ -107,7 +105,6 @@ def test_only_works_on_select(session): session.sql("show tables").create_or_replace_view(view_name) -@pytest.mark.localtest def test_consistent_view_name_behaviors(session, local_testing_mode): view_name = Utils.random_name_for_temp_object(TempObjectType.VIEW) sc = session.get_current_schema() @@ -174,7 +171,6 @@ def test_consistent_view_name_behaviors(session, local_testing_mode): Utils.drop_view(session, view_name) -@pytest.mark.localtest def test_create_temp_view_on_functions(session, local_testing_mode): table_name = Utils.random_name_for_temp_object(TempObjectType.TABLE) view_name = Utils.random_name_for_temp_object(TempObjectType.VIEW) diff --git a/tests/integ/scala/test_window_frame_suite.py b/tests/integ/scala/test_window_frame_suite.py index ad156f8ad21..6fa089cef9e 100644 --- a/tests/integ/scala/test_window_frame_suite.py +++ b/tests/integ/scala/test_window_frame_suite.py @@ -25,7 +25,6 @@ from tests.utils import Utils -@pytest.mark.localtest def test_lead_lag_with_positive_offset(session): df = session.create_dataframe( [(1, "1"), (2, "2"), (1, "3"), (2, "4")], schema=["key", "value"] @@ -37,7 +36,6 @@ def test_lead_lag_with_positive_offset(session): ) -@pytest.mark.localtest def test_reverse_lead_lag_with_positive_offset(session): df = session.create_dataframe( [(1, "1"), (2, "2"), (1, "3"), (2, "4")], schema=["key", "value"] @@ -49,7 +47,6 @@ def test_reverse_lead_lag_with_positive_offset(session): ) -@pytest.mark.localtest def test_lead_lag_with_negative_offset(session): df = session.create_dataframe( [(1, "1"), (2, "2"), (1, "3"), (2, "4")], schema=["key", "value"] @@ -61,7 +58,6 @@ def test_lead_lag_with_negative_offset(session): ) -@pytest.mark.localtest def test_reverse_lead_lag_with_negative_offset(session): df = session.create_dataframe( [(1, "1"), (2, "2"), (1, "3"), (2, "4")], schema=["key", "value"] @@ -73,7 +69,6 @@ def test_reverse_lead_lag_with_negative_offset(session): ) -@pytest.mark.localtest @pytest.mark.parametrize("default", [None, "10"]) def test_lead_lag_with_default_value(session, default): df = session.create_dataframe( @@ -98,7 +93,6 @@ def test_lead_lag_with_default_value(session, default): ) -@pytest.mark.localtest def test_lead_lag_with_ignore_or_respect_nulls(session): df = session.create_dataframe( [(1, 5), (2, 4), (3, None), (4, 2), (5, None), (6, None), (7, 6)], @@ -125,7 +119,6 @@ def test_lead_lag_with_ignore_or_respect_nulls(session): ) -@pytest.mark.localtest def test_first_last_value_with_ignore_or_respect_nulls(session): df = session.create_dataframe( [(1, None), (2, 4), (3, None), (4, 2), (5, None), (6, 6), (7, None)], @@ -152,7 +145,6 @@ def test_first_last_value_with_ignore_or_respect_nulls(session): ) -@pytest.mark.localtest def test_unbounded_rows_range_between_with_aggregation(session): df = session.create_dataframe( [("one", 1), ("two", 2), ("one", 3), ("two", 4)] @@ -176,7 +168,6 @@ def test_unbounded_rows_range_between_with_aggregation(session): ) -@pytest.mark.localtest def test_rows_between_boundary(session): # This test is different from scala as `int` in Python is unbounded df = session.create_dataframe( @@ -244,7 +235,6 @@ def test_rows_between_boundary(session): ) -@pytest.mark.localtest def test_range_between_should_accept_at_most_one_order_by_expression_when_bounded( session, local_testing_mode ): @@ -286,7 +276,6 @@ def test_range_between_should_accept_at_most_one_order_by_expression_when_bounde assert "Sliding window frame unsupported for function MIN" in str(ex_info) -@pytest.mark.localtest def test_range_between_should_accept_non_numeric_values_only_when_unbounded( session, local_testing_mode ): @@ -381,7 +370,6 @@ def test_reverse_sliding_rows_between_with_aggregation(session): ) -@pytest.mark.localtest def test_range_between_should_include_rows_equal_to_current_row(session): df1 = session.create_dataframe( [("b", 10), ("a", 10), ("a", 10), ("d", 15), ("e", 20), ("f", 20)], diff --git a/tests/integ/scala/test_window_spec_suite.py b/tests/integ/scala/test_window_spec_suite.py index d96a12f3bc9..1ce59ffb8b0 100644 --- a/tests/integ/scala/test_window_spec_suite.py +++ b/tests/integ/scala/test_window_spec_suite.py @@ -76,7 +76,6 @@ def test_partition_by_order_by_rows_between(session, local_testing_mode): ) -@pytest.mark.localtest def test_range_between(session): df = session.create_dataframe(["non_numeric"]).to_df("value") window = Window.order_by("value") @@ -158,7 +157,6 @@ def test_window_function_inside_where_and_having_clauses(session): assert "outside of SELECT, QUALIFY, and ORDER BY clauses" in str(ex_info) -@pytest.mark.localtest def test_reuse_window_partition_by(session): df = session.create_dataframe([(1, "1"), (2, "2"), (1, "1"), (2, "2")]).to_df( "key", "value" @@ -171,7 +169,6 @@ def test_reuse_window_partition_by(session): ) -@pytest.mark.localtest def test_reuse_window_order_by(session): df = session.create_dataframe([(1, "1"), (2, "2"), (1, "1"), (2, "2")]).to_df( "key", "value" @@ -216,7 +213,6 @@ def test_rank_functions_in_unspecific_window(session): ) -@pytest.mark.localtest def test_empty_over_spec(session): df = session.create_dataframe([("a", 1), ("a", 1), ("a", 2), ("b", 2)]).to_df( "key", "value" @@ -246,7 +242,6 @@ def test_empty_over_spec(session): ) -@pytest.mark.localtest def test_null_inputs(session): df = session.create_dataframe( [("a", 1), ("a", 1), ("a", 2), ("a", 2), ("b", 4), ("b", 3), ("b", 2)] @@ -269,7 +264,6 @@ def test_null_inputs(session): ) -@pytest.mark.localtest def test_window_function_should_fail_if_order_by_clause_is_not_specified(session): df = session.create_dataframe([(1, "1"), (2, "2"), (1, "2"), (2, "2")]).to_df( "key", "value" @@ -407,7 +401,6 @@ def test_covar_samp_var_samp_stddev_samp_functions_in_specific_window(session): ) -@pytest.mark.localtest def test_aggregation_function_on_invalid_column(session): df = session.create_dataframe([(1, "1")]).to_df("key", "value") with pytest.raises(SnowparkSQLException) as ex_info: @@ -465,7 +458,6 @@ def test_skewness_and_kurtosis_functions_in_window(session): ) -@pytest.mark.localtest def test_window_functions_in_multiple_selects(session): df = session.create_dataframe( [("S1", "P1", 100), ("S1", "P1", 700), ("S2", "P1", 200), ("S2", "P2", 300)] diff --git a/tests/integ/test_column.py b/tests/integ/test_column.py index a55eb7b80ea..cc4f9367034 100644 --- a/tests/integ/test_column.py +++ b/tests/integ/test_column.py @@ -14,7 +14,6 @@ from tests.utils import TestData, Utils -@pytest.mark.localtest def test_column_constructors_subscriptable(session): df = session.create_dataframe([[1, 2, 3]]).to_df("col", '"col"', "col .") assert df.select(df["col"]).collect() == [Row(1)] @@ -32,7 +31,6 @@ def test_column_constructors_subscriptable(session): assert "The DataFrame does not contain the column" in str(ex_info) -@pytest.mark.localtest def test_between(session): df = session.create_dataframe([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]).to_df( ["a", "b"] @@ -48,7 +46,6 @@ def test_between(session): ) -@pytest.mark.localtest def test_try_cast(session): df = session.create_dataframe([["2018-01-01"]], schema=["a"]) cast_res = df.select(df["a"].cast("date")).collect() @@ -56,7 +53,6 @@ def test_try_cast(session): assert cast_res[0][0] == try_cast_res[0][0] == datetime.date(2018, 1, 1) -@pytest.mark.localtest def test_try_cast_work_cast_not_work(session, local_testing_mode): df = session.create_dataframe([["aaa"]], schema=["a"]) with pytest.raises(SnowparkSQLException) as execinfo: @@ -69,7 +65,6 @@ def test_try_cast_work_cast_not_work(session, local_testing_mode): ) # try_cast doesn't throw exception -@pytest.mark.localtest def test_cast_try_cast_negative(session): df = session.create_dataframe([["aaa"]], schema=["a"]) with pytest.raises(ValueError) as execinfo: @@ -88,21 +83,18 @@ def test_cast_decimal(session, number_word): ) -@pytest.mark.localtest def test_cast_map_type(session): df = session.create_dataframe([['{"key": "1"}']], schema=["a"]) result = df.select(parse_json(df["a"]).cast("object")).collect() assert json.loads(result[0][0]) == {"key": "1"} -@pytest.mark.localtest def test_cast_array_type(session): df = session.create_dataframe([["[1,2,3]"]], schema=["a"]) result = df.select(parse_json(df["a"]).cast("array")).collect() assert json.loads(result[0][0]) == [1, 2, 3] -@pytest.mark.localtest def test_startswith(session): Utils.check_answer( TestData.string4(session).select(col("a").startswith(lit("a"))), @@ -111,7 +103,6 @@ def test_startswith(session): ) -@pytest.mark.localtest def test_endswith(session): Utils.check_answer( TestData.string4(session).select(col("a").endswith(lit("ana"))), @@ -120,7 +111,6 @@ def test_endswith(session): ) -@pytest.mark.localtest def test_substring(session): Utils.check_answer( TestData.string4(session).select( @@ -139,7 +129,6 @@ def test_substring(session): ) -@pytest.mark.localtest def test_contains(session): Utils.check_answer( TestData.string4(session).filter(col("a").contains(lit("e"))), @@ -148,7 +137,6 @@ def test_contains(session): ) -@pytest.mark.localtest def test_when_accept_literal_value(session): assert TestData.null_data1(session).select( when(col("a").is_null(), 5).when(col("a") == 1, 6).otherwise(7).as_("a") @@ -163,7 +151,6 @@ def test_when_accept_literal_value(session): ).collect() == [Row(5), Row(None), Row(6), Row(None), Row(5)] -@pytest.mark.localtest def test_logical_operator_raise_error(session): df = session.create_dataframe([[1, 2]], schema=["a", "b"]) with pytest.raises(TypeError) as execinfo: diff --git a/tests/integ/test_column_names.py b/tests/integ/test_column_names.py index c70ee14c3e8..c3c69542bf6 100644 --- a/tests/integ/test_column_names.py +++ b/tests/integ/test_column_names.py @@ -39,7 +39,6 @@ def get_metadata_names(session, df): return [quote_name(metadata.name) for metadata in description] -@pytest.mark.localtest def test_like(session): df1 = session.create_dataframe(["v"], schema=["c"]) df2 = df1.select(df1["c"].like(lit("v%"))) @@ -62,7 +61,6 @@ def test_like(session): ) -@pytest.mark.localtest def test_regexp(session): df1 = session.create_dataframe(["v"], schema=["c"]) df2 = df1.select(df1["c"].regexp(lit("v%"))) @@ -107,7 +105,6 @@ def test_collate(session): ) -@pytest.mark.localtest def test_subfield(session): df1 = session.create_dataframe( data=[[[1, 2, 3], {"a": "b"}]], schema=["c", '"c c"'] @@ -183,7 +180,6 @@ def test_specified_window_frame(session): ) -@pytest.mark.localtest def test_cast(session): df1 = session.create_dataframe([[1, "v"]], schema=["a", '" a"']) @@ -343,7 +339,6 @@ def test_interval(session): ) -@pytest.mark.localtest def test_attribute(session): df1 = session.create_dataframe([[1, 2]], schema=[" a", "a"]) df2 = df1.select(df1[" a"], df1["a"]) @@ -359,7 +354,6 @@ def test_attribute(session): ] # In class ColumnIdentifier, the "" is removed for '"A"'. -@pytest.mark.localtest def test_unresolved_attribute(session): df1 = session.create_dataframe([[1, 2]], schema=[" a", "a"]) @@ -376,7 +370,6 @@ def test_unresolved_attribute(session): ] # In class ColumnIdentifier, the "" is removed for '"A"'. -@pytest.mark.localtest def test_star(session): df1 = session.create_dataframe([[1, 2]], schema=[" a", "a"]) df2 = df1.select(df1["*"]) @@ -401,7 +394,6 @@ def test_star(session): ] # In class ColumnIdentifier, the "" is removed for '"A"'. -@pytest.mark.localtest def test_function_expression(session, local_testing_mode): df1 = session.create_dataframe(["a"], schema=["a"]) if not local_testing_mode: @@ -680,7 +672,6 @@ def test_binary_expression(session): ) -@pytest.mark.localtest def test_cast_nan_column_name(session): df1 = session.create_dataframe([["a"]], schema=["a"]) df2 = df1.select(df1["A"] == math.nan) @@ -713,7 +704,6 @@ def test_table_function(): ... -@pytest.mark.localtest def test_str_column_name_no_quotes(session, local_testing_mode): decimal_string = "1.500000" df = session.create_dataframe([1, 2], schema=["a"]) @@ -732,7 +722,6 @@ def test_str_column_name_no_quotes(session, local_testing_mode): ) -@pytest.mark.localtest def test_show_column_name_with_quotes(session, local_testing_mode): df = session.create_dataframe([1, 2], schema=["a"]) assert ( diff --git a/tests/integ/test_dataframe.py b/tests/integ/test_dataframe.py index 4cfb155c9d2..f5fb4d5d8d8 100644 --- a/tests/integ/test_dataframe.py +++ b/tests/integ/test_dataframe.py @@ -127,7 +127,6 @@ def table_name_1(session): Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_dataframe_get_item(session): df = session.create_dataframe([[1, "a"], [2, "b"], [3, "c"], [4, "d"]]).to_df( "id", "value" @@ -142,7 +141,6 @@ def test_dataframe_get_item(session): assert "Unexpected item type: " in str(exc_info) -@pytest.mark.localtest def test_dataframe_get_attr(session): df = session.create_dataframe([[1, "a"], [2, "b"], [3, "c"], [4, "d"]]).to_df( "id", "value" @@ -228,7 +226,6 @@ def test_show_using_with_select_statement(session): ) -@pytest.mark.localtest def test_distinct(session): """Tests df.distinct().""" @@ -265,7 +262,6 @@ def test_distinct(session): assert res == [Row(None), Row(1), Row(2), Row(3), Row(4), Row(5)] -@pytest.mark.localtest def test_first(session): """Tests df.first().""" @@ -311,7 +307,6 @@ def test_first(session): assert "Invalid type of argument passed to first()" in str(ex_info) -@pytest.mark.localtest def test_new_df_from_range(session): """Tests df.range().""" @@ -355,7 +350,6 @@ def test_new_df_from_range(session): assert res == expected -@pytest.mark.localtest def test_select_single_column(session): """Tests df.select() on dataframes with a single column.""" @@ -382,7 +376,6 @@ def test_select_single_column(session): assert res == expected -@pytest.mark.localtest def test_select_star(session): """Tests df.select('*').""" @@ -960,7 +953,6 @@ def process(self, a: int, b: int) -> Iterable[Tuple[int, int]]: ) -@pytest.mark.localtest def test_df_subscriptable(session): """Tests select & filter as df[...]""" @@ -1010,7 +1002,6 @@ def test_df_subscriptable(session): assert res == expected -@pytest.mark.localtest def test_filter(session): """Tests for df.filter().""" df = session.range(1, 10, 2) @@ -1060,7 +1051,6 @@ def test_filter_with_sql_str(session): ) -@pytest.mark.localtest def test_filter_incorrect_type(session): """Tests for incorrect type passed to DataFrame.filter().""" df = session.range(1, 10, 2) @@ -1073,7 +1063,6 @@ def test_filter_incorrect_type(session): ) -@pytest.mark.localtest def test_filter_chained(session): """Tests for chained DataFrame.filter() operations""" @@ -1105,7 +1094,6 @@ def test_filter_chained(session): assert res == expected -@pytest.mark.localtest def test_filter_chained_col_objects_int(session): """Tests for chained DataFrame.filter() operations.""" @@ -1135,7 +1123,6 @@ def test_filter_chained_col_objects_int(session): assert res == expected -@pytest.mark.localtest def test_drop(session): """Test for dropping columns from a dataframe.""" @@ -1169,7 +1156,6 @@ def test_drop(session): assert res == expected -@pytest.mark.localtest def test_alias(session): """Test for dropping columns from a dataframe.""" @@ -1192,7 +1178,6 @@ def test_alias(session): assert res == expected -@pytest.mark.localtest def test_join_inner(session): """Test for inner join of dataframes.""" @@ -1224,7 +1209,6 @@ def test_join_inner(session): assert res == expected -@pytest.mark.localtest def test_join_left_anti(session): """Test for left-anti join of dataframes.""" @@ -1255,7 +1239,6 @@ def test_join_left_anti(session): assert res == [Row(ID=None), Row(ID=None)] -@pytest.mark.localtest def test_join_left_outer(session): """Test for left-outer join of dataframes.""" @@ -1292,7 +1275,6 @@ def test_join_left_outer(session): assert sorted(res, key=lambda r: r[0]) == expected -@pytest.mark.localtest def test_join_right_outer(session): """Test for right-outer join of dataframes.""" @@ -1329,7 +1311,6 @@ def test_join_right_outer(session): assert sorted(res, key=lambda r: r[0]) == expected -@pytest.mark.localtest def test_join_left_semi(session): """Test for left semi join of dataframes.""" @@ -1354,7 +1335,6 @@ def test_join_left_semi(session): assert sorted(res, key=lambda r: r[0]) == expected -@pytest.mark.localtest def test_join_cross(session): """Test for cross join of dataframes.""" @@ -1394,7 +1374,6 @@ def test_join_cross(session): assert sorted(res, key=lambda r: (r[0], r[1])) == expected -@pytest.mark.localtest def test_join_outer(session): """Test for outer join of dataframes.""" @@ -1443,7 +1422,6 @@ def test_join_outer(session): assert sorted(res, key=lambda r: r[0]) == expected -@pytest.mark.localtest def test_toDF(session): """Test df.to_df().""" @@ -1471,7 +1449,6 @@ def test_toDF(session): assert sorted(res, key=lambda r: r[0]) == expected -@pytest.mark.localtest def test_df_col(session): """Test df.col()""" @@ -1530,7 +1507,6 @@ def test_create_dataframe_with_basic_data_types(session): assert df.select(expected_names).collect() == expected_rows -@pytest.mark.localtest def test_create_dataframe_with_semi_structured_data_types(session): data = [ [ @@ -1647,7 +1623,6 @@ def test_create_dataframe_with_dict(session): ) -@pytest.mark.localtest def test_create_dataframe_with_dict_given_schema(session): schema = StructType( [ @@ -1710,7 +1685,6 @@ def test_create_dataframe_with_dict_given_schema(session): Utils.check_answer(df, [Row(None, None), Row(None, None)]) -@pytest.mark.localtest def test_create_dataframe_with_namedtuple(session): Data = namedtuple("Data", [f"snow_{idx + 1}" for idx in range(5)]) data = Data(*[idx**3 for idx in range(5)]) @@ -1732,7 +1706,6 @@ def test_create_dataframe_with_namedtuple(session): Utils.check_answer(df, [Row(1, 2, None, None), Row(None, None, 3, 4)]) -@pytest.mark.localtest def test_create_dataframe_with_row(session): row1 = Row(a=1, b=2) row2 = Row(a=3, b=4) @@ -1768,7 +1741,6 @@ def test_create_dataframe_with_row(session): assert "4 fields are required by schema but 2 values are provided" in str(ex_info) -@pytest.mark.localtest def test_create_dataframe_with_mixed_dict_namedtuple_row(session): d = {"a": 1, "b": 2} Data = namedtuple("Data", ["a", "b"]) @@ -1786,7 +1758,6 @@ def test_create_dataframe_with_mixed_dict_namedtuple_row(session): ) -@pytest.mark.localtest def test_create_dataframe_with_schema_col_names(session): col_names = ["a", "b", "c", "d"] df = session.create_dataframe([[1, 2, 3, 4]], schema=col_names) @@ -1807,7 +1778,6 @@ def test_create_dataframe_with_schema_col_names(session): assert Utils.equals_ignore_case(field.name, expected_name) -@pytest.mark.localtest def test_create_dataframe_with_variant(session): data = [ 1, @@ -1845,7 +1815,6 @@ def test_create_dataframe_with_variant(session): ] -@pytest.mark.localtest @pytest.mark.parametrize("data", [[0, 1, 2, 3], ["", "a"], [False, True], [None]]) def test_create_dataframe_with_single_value(session, data): expected_names = ["_1"] @@ -1914,7 +1883,6 @@ def test_create_dataframe_empty(session): assert df.with_column("c", lit(2)).columns == ["A", "B", "C"] -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC_LOCALFS, reason="Large result") def test_create_dataframe_from_none_data(session): assert session.create_dataframe([None, None]).collect() == [ @@ -1994,7 +1962,6 @@ def test_create_dataframe_large_respects_paramstyle_negative(db_parameters): new_session.close() -@pytest.mark.localtest def test_create_dataframe_with_invalid_data(session): # None input with pytest.raises(ValueError) as ex_info: @@ -2047,7 +2014,6 @@ def test_create_dataframe_with_invalid_data(session): assert "data consists of rows with different lengths" in str(ex_info) -@pytest.mark.localtest def test_attribute_reference_to_sql(session): from snowflake.snowpark.functions import sum as sum_ @@ -2206,7 +2172,6 @@ def test_case_insensitive_local_iterator(session): assert row["P@$$W0RD"] == "test" -@pytest.mark.localtest def test_dropna(session, local_testing_mode): Utils.check_answer( TestData.double3(session, local_testing_mode).dropna(), [Row(1.0, 1)] @@ -2236,7 +2201,6 @@ def test_dropna(session, local_testing_mode): assert "subset should be a list or tuple of column names" in str(ex_info) -@pytest.mark.localtest def test_dropna_large_num_of_columns(session): n = 1000 data = [str(i) for i in range(n)] @@ -2245,7 +2209,6 @@ def test_dropna_large_num_of_columns(session): Utils.check_answer(df.dropna(how="all"), [Row(*data)]) -@pytest.mark.localtest def test_fillna(session, local_testing_mode): Utils.check_answer( TestData.double3(session, local_testing_mode).fillna(11), @@ -2410,7 +2373,6 @@ def test_replace_with_coercion(session): assert "to_replace and value lists should be of the same length" in str(ex_info) -@pytest.mark.localtest def test_select_case_expr(session): df = session.create_dataframe([1, 2, 3], schema=["a"]) Utils.check_answer( @@ -2594,7 +2556,6 @@ def test_truncate_existing_table(session): assert session.table(table_name).count() == 3 -@pytest.mark.localtest @pytest.mark.parametrize("table_type", ["", "temp", "temporary", "transient"]) @pytest.mark.parametrize( "save_mode", ["append", "overwrite", "ignore", "errorifexists", "truncate"] @@ -2858,7 +2819,6 @@ def test_write_table_with_clustering_keys_and_comment( Utils.drop_table(session, table_name3) -@pytest.mark.localtest @pytest.mark.parametrize("table_type", ["temp", "temporary", "transient"]) @pytest.mark.parametrize( "save_mode", ["append", "overwrite", "ignore", "errorifexists", "truncate"] @@ -2886,7 +2846,6 @@ def test_write_temp_table_no_breaking_change( Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_write_invalid_table_type(session): table_name = Utils.random_name_for_temp_object(TempObjectType.TABLE) df = session.create_dataframe([(1, 2), (3, 4)]).toDF("a", "b") @@ -3024,7 +2983,6 @@ def test_queries(session): assert post_actions[0].startswith("DROP") -@pytest.mark.localtest def test_df_columns(session): assert session.create_dataframe([1], schema=["a"]).columns == ["A"] @@ -3344,14 +3302,12 @@ def test_call_with_statement_params(session): Utils.drop_stage(session, temp_stage) -@pytest.mark.localtest def test_limit_offset(session): df = session.create_dataframe([[1, 2, 3], [4, 5, 6]], schema=["a", "b", "c"]) assert df.limit(1).collect() == [Row(A=1, B=2, C=3)] assert df.limit(1, offset=1).collect() == [Row(A=4, B=5, C=6)] -@pytest.mark.localtest def test_df_join_how_on_overwrite(session): df1 = session.create_dataframe([[1, 1, "1"], [2, 2, "3"]]).to_df( ["int", "int2", "str"] @@ -3367,7 +3323,6 @@ def test_df_join_how_on_overwrite(session): Utils.check_answer(df, [Row(1, 1, "1"), Row(2, 3, "5")]) -@pytest.mark.localtest def test_create_dataframe_special_char_column_name(session): df1 = session.create_dataframe( [[1, 2, 3], [1, 2, 3]], schema=["a b", '"abc"', "@%!^@&#"] @@ -3388,7 +3343,6 @@ def test_create_dataframe_with_tuple_schema(session): Utils.check_answer(df, [Row(20000101, 1, "x"), Row(20000101, 2, "y")]) -@pytest.mark.localtest def test_df_join_suffix(session): df1 = session.create_dataframe([[1, 1, "1"], [2, 2, "3"]]).to_df(["a", "b", "c"]) df2 = session.create_dataframe([[1, 1, "1"], [2, 3, "5"]]).to_df(["a", "b", "c"]) @@ -3451,7 +3405,6 @@ def test_df_join_suffix(session): assert df14.columns == ['"a_l"', '"a_r"'] -@pytest.mark.localtest def test_df_cross_join_suffix(session): df1 = session.create_dataframe([[1, 1, "1"]]).to_df(["a", "b", "c"]) df2 = session.create_dataframe([[1, 1, "1"]]).to_df(["a", "b", "c"]) @@ -3495,7 +3448,6 @@ def test_df_cross_join_suffix(session): assert df14.columns == ['"a_l"', '"a_r"'] -@pytest.mark.localtest def test_suffix_negative(session): df1 = session.create_dataframe([[1, 1, "1"]]).to_df(["a", "b", "c"]) df2 = session.create_dataframe([[1, 1, "1"]]).to_df(["a", "b", "c"]) @@ -3541,7 +3493,6 @@ def test_create_or_replace_dynamic_table_with_multiple_queries(session): ) -@pytest.mark.localtest def test_nested_joins(session): df1 = session.create_dataframe([[1, 2], [4, 5]], schema=["a", "b"]) df2 = session.create_dataframe([[1, 3], [4, 6]], schema=["c", "d"]) @@ -3809,7 +3760,6 @@ def test_dataframe_alias_negative(session): col("df", df["a"]) -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot change schema in SP") def test_dataframe_result_cache_changing_schema(session): df = session.create_dataframe([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]).to_df( diff --git a/tests/integ/test_datatypes.py b/tests/integ/test_datatypes.py index d8138a4c4dd..b69dd3f5d67 100644 --- a/tests/integ/test_datatypes.py +++ b/tests/integ/test_datatypes.py @@ -3,8 +3,6 @@ # from decimal import Decimal -import pytest - from snowflake.snowpark import DataFrame, Row from snowflake.snowpark.functions import lit from snowflake.snowpark.types import ( @@ -20,7 +18,6 @@ from tests.utils import Utils -@pytest.mark.localtest def test_basic_filter(session): df: DataFrame = session.create_dataframe( [ @@ -44,7 +41,6 @@ def test_basic_filter(session): ) -@pytest.mark.localtest def test_plus_basic(session): df = session.create_dataframe( [[1, 1.1, 2.2, 3.3]], @@ -74,7 +70,6 @@ def test_plus_basic(session): ) -@pytest.mark.localtest def test_minus_basic(session): df = session.create_dataframe( [[1, 1.1, 2.2, 3.3]], @@ -104,7 +99,6 @@ def test_minus_basic(session): ) -@pytest.mark.localtest def test_multiple_basic(session): df = session.create_dataframe( [[1, 1.1, 2.2, 3.3]], @@ -134,7 +128,6 @@ def test_multiple_basic(session): ) -@pytest.mark.localtest def test_divide_basic(session): df = session.create_dataframe( [[1, 1.1, 2.2, 3.3]], @@ -167,7 +160,6 @@ def test_divide_basic(session): ) -@pytest.mark.localtest def test_div_decimal_double(session): df = session.create_dataframe( [[11.0, 13.0]], @@ -183,7 +175,6 @@ def test_div_decimal_double(session): Utils.check_answer(df2, [Row(Decimal("0.846154"))]) -@pytest.mark.localtest def test_modulo_basic(session): df = session.create_dataframe( [[1, 1.1, 2.2, 3.3]], @@ -213,7 +204,6 @@ def test_modulo_basic(session): ) -@pytest.mark.localtest def test_binary_ops_bool(session): df = session.create_dataframe( [[1, 1.1]], @@ -267,7 +257,6 @@ def test_binary_ops_bool(session): ) -@pytest.mark.localtest def test_unary_ops_bool(session): df = session.create_dataframe( [[1, 1.1]], @@ -297,7 +286,6 @@ def test_unary_ops_bool(session): ) -@pytest.mark.localtest def test_literal(session): df = session.create_dataframe( [[1]], schema=StructType([StructField("a", LongType(), nullable=False)]) @@ -308,7 +296,6 @@ def test_literal(session): ) -@pytest.mark.localtest def test_string_op_bool(session): df = session.create_dataframe([["value"]], schema=["a"]) df = df.select(df["a"].like("v%"), df["a"].regexp("v")) @@ -322,7 +309,6 @@ def test_string_op_bool(session): ) -@pytest.mark.localtest def test_filter(session): df = session.create_dataframe( [[1, 1.1, 2.2, 3.3]], @@ -340,7 +326,6 @@ def test_filter(session): assert repr(df1.schema) == repr(df.schema) -@pytest.mark.localtest def test_sort(session): df = session.create_dataframe( [[1, 1.1, 2.2, 3.3]], @@ -358,7 +343,6 @@ def test_sort(session): assert repr(df1.schema) == repr(df.schema) -@pytest.mark.localtest def test_limit(session): df = session.create_dataframe( [[1, 1.1, 2.2, 3.3]], @@ -376,7 +360,6 @@ def test_limit(session): assert repr(df1.schema) == repr(df.schema) -@pytest.mark.localtest def test_chain_filter_sort_limit(session): df = session.create_dataframe( [[1, 1.1, 2.2, 3.3]], @@ -398,7 +381,6 @@ def test_chain_filter_sort_limit(session): assert repr(df1.schema) == repr(df.schema) -@pytest.mark.localtest def test_join_basic(session): df = session.create_dataframe( [[1, 1.1, 2.2, 3.3]], diff --git a/tests/integ/test_df_aggregate.py b/tests/integ/test_df_aggregate.py index a4cd6d49c8e..011efcd768d 100644 --- a/tests/integ/test_df_aggregate.py +++ b/tests/integ/test_df_aggregate.py @@ -35,7 +35,6 @@ from tests.utils import Utils -@pytest.mark.localtest def test_df_agg_tuples_basic_without_std(session): df = session.create_dataframe([[1, 4], [1, 4], [2, 5], [2, 6]]).to_df( ["first", "second"] @@ -179,7 +178,6 @@ def test_df_agg_tuples_basic(session): Utils.assert_rows(res, [Row(1, 4, 2, 4.75, 0.577349980514419)]) -@pytest.mark.localtest def test_df_agg_tuples_avg_basic(session): """Test for making sure all avg word-variations work as expected""" @@ -215,7 +213,6 @@ def test_df_agg_tuples_std_basic(session): Utils.assert_rows(res, [Row(0.577349980514419)]) -@pytest.mark.localtest def test_df_agg_tuples_count_basic(session): """Test for making sure all count variations work as expected""" @@ -230,7 +227,6 @@ def test_df_agg_tuples_count_basic(session): Utils.assert_rows(res, [Row(4)]) -@pytest.mark.localtest def test_df_group_by_invalid_input(session): """Test for check invalid input for group_by function""" @@ -251,7 +247,6 @@ def test_df_group_by_invalid_input(session): ) -@pytest.mark.localtest def test_df_agg_tuples_sum_basic(session): """Test for making sure sum works as expected""" @@ -278,7 +273,6 @@ def test_df_agg_tuples_sum_basic(session): Utils.assert_rows(res, [Row(1, 8), Row(2, 11)]) -@pytest.mark.localtest def test_df_agg_dict_arg(session): """Test for making sure dict when passed to agg() works as expected""" @@ -320,7 +314,6 @@ def test_df_agg_dict_arg(session): ) -@pytest.mark.localtest def test_df_agg_invalid_args_in_list(session): """Test for making sure when a list passed to agg() produces correct errors.""" @@ -376,7 +369,6 @@ def test_df_agg_invalid_args_in_list(session): ) -@pytest.mark.localtest def test_df_agg_empty_args(session): """Test for making sure dict when passed to agg() works as expected""" @@ -387,7 +379,6 @@ def test_df_agg_empty_args(session): Utils.assert_rows(df.agg({}).collect(), [Row(1, 4)]) -@pytest.mark.localtest def test_df_agg_varargs_tuple_list(session): df = session.create_dataframe([[1, 4], [1, 4], [2, 5], [2, 6]]).to_df( ["first", "second"] @@ -401,7 +392,6 @@ def test_df_agg_varargs_tuple_list(session): Utils.check_answer(df.agg(["first", "count"], ("second", "sum")), [Row(4, 19)]) -@pytest.mark.localtest @pytest.mark.parametrize( "col1,col2,alias1,alias2", [ @@ -425,7 +415,6 @@ def test_df_agg_with_nonascii_column_names(session, col1, col2, alias1, alias2): assert df.agg(count(col1), sum_(col2)).columns == [alias1, alias2] -@pytest.mark.localtest def test_agg_single_column(session, local_testing_mode): val = "86.333333" origin_df = session.create_dataframe( @@ -443,7 +432,6 @@ def test_agg_single_column(session, local_testing_mode): assert origin_df.count() == 7 -@pytest.mark.localtest def test_agg_double_column(session): origin_df = session.create_dataframe( [ @@ -479,7 +467,6 @@ def test_agg_double_column(session): assert math.isnan(origin_df.select(sum_(col("m") - col("n"))).collect()[0][0]) -@pytest.mark.localtest def test_agg_function_multiple_parameters(session): origin_df = session.create_dataframe(["k1", "k1", "k3", "k4", [None]], schema=["v"]) assert origin_df.select(listagg("v", delimiter='~!1,."')).collect() == [ @@ -491,7 +478,6 @@ def test_agg_function_multiple_parameters(session): ).collect() == [Row('k1~!1,."k3~!1,."k4')] -@pytest.mark.localtest def test_register_new_methods(session, local_testing_mode): if not local_testing_mode: pytest.skip("mock implementation does not apply to live code") @@ -577,7 +563,6 @@ def mock_mock_grouping(*columns): assert origin_df.select(grouping("m", col("n"))).collect() == [Row(123)] -@pytest.mark.localtest def test_group_by(session, local_testing_mode): origin_df = session.create_dataframe( [ @@ -645,7 +630,6 @@ def mock_approx_percentile_combine(state: ColumnEmulator): ) -@pytest.mark.localtest def test_agg(session, local_testing_mode): origin_df = session.create_dataframe( [ diff --git a/tests/integ/test_df_sort.py b/tests/integ/test_df_sort.py index 9e0c087a147..335e215ba4b 100644 --- a/tests/integ/test_df_sort.py +++ b/tests/integ/test_df_sort.py @@ -8,7 +8,6 @@ from snowflake.snowpark import Column -@pytest.mark.localtest def test_sort_different_inputs(session): df = session.create_dataframe( [(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (3, 3)] @@ -49,7 +48,6 @@ def test_sort_different_inputs(session): ) -@pytest.mark.localtest def test_sort_invalid_inputs(session): df = session.create_dataframe( [(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (3, 3)] diff --git a/tests/integ/test_df_to_pandas.py b/tests/integ/test_df_to_pandas.py index 0876cea8a03..e8c1cbcd7f7 100644 --- a/tests/integ/test_df_to_pandas.py +++ b/tests/integ/test_df_to_pandas.py @@ -53,7 +53,6 @@ from tests.utils import IS_IN_STORED_PROC, Utils -@pytest.mark.localtest def test_to_pandas_new_df_from_range(session): # Single column snowpark_df = session.range(3, 8) @@ -79,7 +78,6 @@ def test_to_pandas_new_df_from_range(session): assert all(pandas_df["OTHER"][i] == i + 3 for i in range(5)) -@pytest.mark.localtest @pytest.mark.parametrize("to_pandas_api", ["to_pandas", "to_pandas_batches"]) def test_to_pandas_cast_integer(session, to_pandas_api, local_testing_mode): snowpark_df = session.create_dataframe( @@ -225,7 +223,6 @@ def check_fetch_data_exception(query: str) -> None: isinstance(df.toPandas(), PandasDF) -@pytest.mark.localtest def test_to_pandas_for_int_column_with_none_values(session): # Assert that we try to fit into int64 when possible and keep precision data = [[0], [1], [None]] @@ -242,7 +239,6 @@ def test_to_pandas_for_int_column_with_none_values(session): @pytest.mark.skipif( IS_IN_STORED_PROC, reason="SNOW-507565: Need localaws for large result" ) -@pytest.mark.localtest def test_to_pandas_batches(session, local_testing_mode): df = session.range(100000).cache_result() iterator = df.to_pandas_batches() diff --git a/tests/integ/test_function.py b/tests/integ/test_function.py index cf1261e9111..9a5a6301df5 100644 --- a/tests/integ/test_function.py +++ b/tests/integ/test_function.py @@ -173,7 +173,6 @@ from tests.utils import TestData, Utils -@pytest.mark.localtest def test_order(session): null_data1 = TestData.null_data1(session) assert null_data1.sort(asc(null_data1["A"])).collect() == [ @@ -220,7 +219,6 @@ def test_order(session): ] -@pytest.mark.localtest def test_current_date_and_time(session): max_delta = 1 df = ( @@ -287,7 +285,6 @@ def test_regexp_extract(session): assert res[0]["RES"] == "30" and res[1]["RES"] == "50" -@pytest.mark.localtest @pytest.mark.parametrize( "col_a, col_b, col_c", [("a", "b", "c"), (col("a"), col("b"), col("c"))] ) @@ -297,7 +294,6 @@ def test_concat(session, col_a, col_b, col_c): assert res[0][0] == "123" -@pytest.mark.localtest @pytest.mark.parametrize( "col_a, col_b, col_c", [("a", "b", "c"), (col("a"), col("b"), col("c"))] ) @@ -307,7 +303,6 @@ def test_concat_ws(session, col_a, col_b, col_c): assert res[0][0] == "1,2,3" -@pytest.mark.localtest def test_concat_edge_cases(session): df = session.create_dataframe( [[None, 1, 2, 3], [4, None, 6, 7], [8, 9, None, 11], [12, 13, 14, None]] @@ -323,7 +318,6 @@ def test_concat_edge_cases(session): assert nulls_ws == [Row(None), Row(None), Row(None), Row("12,13,14")] -@pytest.mark.localtest @pytest.mark.parametrize( "col_a", ["a", col("a")], @@ -354,7 +348,6 @@ def test_primitive_to_char(session, col_a, data, fmt, expected, convert_func): assert res[0][0] == expected -@pytest.mark.localtest @pytest.mark.parametrize("convert_func", [to_char, to_varchar]) def test_date_or_time_to_char(session, convert_func): # DateType @@ -407,7 +400,6 @@ def test_date_or_time_to_char(session, convert_func): ] -@pytest.mark.localtest @pytest.mark.parametrize("convert_func", [to_char, to_varchar]) def test_semi_structure_to_char(session, convert_func): assert session.create_dataframe([1]).select( @@ -452,7 +444,6 @@ def test_months_between(session, col_a, col_b): assert res[0][0] == 1.0 -@pytest.mark.localtest @pytest.mark.parametrize("col_a", ["a", col("a")]) def test_cast(session, col_a): df = session.create_dataframe([["2018-01-01"]], schema=["a"]) @@ -461,7 +452,6 @@ def test_cast(session, col_a): assert cast_res[0][0] == try_cast_res[0][0] == datetime.date(2018, 1, 1) -@pytest.mark.localtest @pytest.mark.parametrize("number_word", ["decimal", "number", "numeric"]) def test_cast_decimal(session, number_word): df = session.create_dataframe([[5.2354]], schema=["a"]) @@ -470,14 +460,12 @@ def test_cast_decimal(session, number_word): ) -@pytest.mark.localtest def test_cast_map_type(session): df = session.create_dataframe([['{"key": "1"}']], schema=["a"]) result = df.select(cast(parse_json(df["a"]), "object")).collect() assert json.loads(result[0][0]) == {"key": "1"} -@pytest.mark.localtest def test_cast_array_type(session): df = session.create_dataframe([["[1,2,3]"]], schema=["a"]) result = df.select(cast(parse_json(df["a"]), "array")).collect() @@ -525,7 +513,6 @@ def test_to_boolean(session): df.select(to_boolean("t")).collect() -@pytest.mark.localtest def test_startswith(session): Utils.check_answer( TestData.string4(session).select(col("a").startswith(lit("a"))), @@ -577,7 +564,6 @@ def test_strtok_to_array(session): assert res[0] == "a" and res[1] == "b" and res[2] == "c" -@pytest.mark.localtest @pytest.mark.parametrize("use_col", [True, False]) @pytest.mark.parametrize( "values,expected", @@ -598,7 +584,6 @@ def test_greatest(session, use_col, values, expected): assert res[0][0] == expected -@pytest.mark.localtest @pytest.mark.parametrize("use_col", [True, False]) @pytest.mark.parametrize( "values,expected", @@ -1077,7 +1062,6 @@ def test_is_negative(session): assert "Invalid argument types for function 'IS_TIMESTAMP_TZ'" in str(ex_info) -@pytest.mark.localtest def test_parse_json(session): assert TestData.null_json1(session).select(parse_json(col("v"))).collect() == [ Row('{\n "a": null\n}'), @@ -1275,7 +1259,6 @@ def test_as_negative(session): ) -@pytest.mark.localtest def test_to_date_to_array_to_variant_to_object(session, local_testing_mode): df = ( session.create_dataframe( @@ -1307,7 +1290,6 @@ def test_to_date_to_array_to_variant_to_object(session, local_testing_mode): assert df1.schema.fields[3].datatype == MapType(StringType(), StringType()) -@pytest.mark.localtest def test_to_binary(session): res = ( TestData.test_data1(session) @@ -1486,7 +1468,6 @@ def test_vector_distances(session): ) -@pytest.mark.localtest def test_coalesce(session): # Taken from FunctionSuite.scala Utils.check_answer( @@ -1554,7 +1535,6 @@ def test_uniform_negative(session): assert "Numeric value 'z' is not recognized" in str(ex_info) -@pytest.mark.localtest def test_negate_and_not_negative(session): with pytest.raises(TypeError) as ex_info: TestData.null_data2(session).select(negate(["A", "B", "C"])) @@ -2070,7 +2050,6 @@ def test_create_map_negative(session): ) -@pytest.mark.localtest def test_to_double(session, local_testing_mode): # Test supported input type @@ -2136,7 +2115,6 @@ def test_to_double(session, local_testing_mode): ) -@pytest.mark.localtest def test_to_decimal(session, local_testing_mode): # Supported input type df = session.create_dataframe( diff --git a/tests/integ/test_packaging.py b/tests/integ/test_packaging.py index adb7a75a39a..1779e94415b 100644 --- a/tests/integ/test_packaging.py +++ b/tests/integ/test_packaging.py @@ -184,7 +184,6 @@ def test_patch_on_get_available_versions_for_packages(session): assert "catboost" not in returned -@pytest.mark.localtest @pytest.mark.udf @pytest.mark.skipif( (not is_pandas_and_numpy_available) or IS_IN_STORED_PROC, @@ -302,7 +301,6 @@ def check_if_package_installed() -> bool: Utils.check_answer(session.sql(f"select {udf_name}()").collect(), [Row(True)]) -@pytest.mark.localtest @pytest.mark.udf def test_add_packages_with_underscore_and_versions(session): session.add_packages(["huggingface_hub==0.15.1"]) @@ -366,7 +364,6 @@ def test_add_packages_negative(session, caplog): session.remove_package("python-dateutil") -@pytest.mark.localtest @pytest.mark.udf @pytest.mark.skipif( (not is_pandas_and_numpy_available) or IS_IN_STORED_PROC, @@ -397,7 +394,6 @@ def get_numpy_pandas_version() -> str: ) -@pytest.mark.localtest def test_add_requirements_twice_should_fail_if_packages_are_different( session, resources_path ): @@ -707,7 +703,6 @@ def get_numpy_pandas_version() -> str: Utils.check_answer(session.sql(f"select {udf_name}()"), [Row("0.11.1/1.10.1")]) -@pytest.mark.localtest def test_add_requirements_with_bad_yaml(session, bad_yaml_file): with pytest.raises( ValueError, @@ -716,7 +711,6 @@ def test_add_requirements_with_bad_yaml(session, bad_yaml_file): session.add_requirements(bad_yaml_file) -@pytest.mark.localtest def test_add_requirements_with_ranged_requirements_in_yaml(session, ranged_yaml_file): with pytest.raises( ValueError, @@ -1071,7 +1065,6 @@ def test_get_available_versions_for_packages(session): assert len(returned[key]) > 0 -@pytest.mark.localtest @pytest.mark.skipif( IS_IN_STORED_PROC, reason="Subprocess calls are not allowed within stored procedures.", diff --git a/tests/integ/test_session.py b/tests/integ/test_session.py index 6a90ee17c6d..bb5d896cc89 100644 --- a/tests/integ/test_session.py +++ b/tests/integ/test_session.py @@ -112,27 +112,23 @@ def test_sql_select_with_params(session): assert res == [Row(1)] -@pytest.mark.localtest def test_active_session(session): assert session == _get_active_session() assert not session._conn._conn.expired -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_multiple_active_sessions(session, db_parameters): with Session.builder.configs(db_parameters).create() as session2: assert {session, session2} == _get_active_sessions() -@pytest.mark.localtest def test_get_or_create(session): # because there is already a session it should report the same new_session = Session.builder.getOrCreate() assert session == new_session -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_get_or_create_no_previous(db_parameters, session): # Test getOrCreate error. In this case we want to make sure that @@ -163,7 +159,6 @@ def test_get_or_create_no_previous(db_parameters, session): new_session2.close() -@pytest.mark.localtest def test_session_builder(session): builder1 = session.builder builder2 = session.builder @@ -186,7 +181,6 @@ def test_session_cancel_all(session): assert "cancelled" in session._conn._cursor.fetchall()[0][0] -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_multiple_sessions(session, db_parameters): with Session.builder.configs(db_parameters).create(): @@ -195,7 +189,6 @@ def test_multiple_sessions(session, db_parameters): assert exec_info.value.error_code == "1409" -@pytest.mark.localtest def test_no_default_session(): sessions_backup = list(_active_sessions) _active_sessions.clear() @@ -211,7 +204,6 @@ def test_no_default_session(): _active_sessions.update(sessions_backup) -@pytest.mark.localtest def test_create_session_in_sp(session): import snowflake.snowpark._internal.utils as internal_utils @@ -313,7 +305,6 @@ def test_list_files_in_stage(session, resources_path): Utils.drop_stage(session, single_quoted_name) -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_create_session_from_parameters(db_parameters, sql_simplifier_enabled): session_builder = Session.builder.configs(db_parameters) @@ -329,7 +320,6 @@ def test_create_session_from_parameters(db_parameters, sql_simplifier_enabled): new_session.close() -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_create_session_from_connection( db_parameters, sql_simplifier_enabled, local_testing_mode @@ -382,7 +372,6 @@ def test_create_session_from_connection_with_noise_parameters( reason="Query tag is a SQL feature", run=False, ) -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_session_builder_app_name(session, db_parameters): builder = session.builder @@ -506,7 +495,6 @@ def check_table_and_drop(table_name_str): Utils.drop_schema(session, double_quoted_schema) -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_use_database(db_parameters, sql_simplifier_enabled): parameters = db_parameters.copy() @@ -520,7 +508,6 @@ def test_use_database(db_parameters, sql_simplifier_enabled): assert session.get_current_database() == f'"{db_name.upper()}"' -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_use_schema(db_parameters, sql_simplifier_enabled, local_testing_mode): parameters = db_parameters.copy() @@ -543,7 +530,6 @@ def test_use_schema(db_parameters, sql_simplifier_enabled, local_testing_mode): session.sql(f"DROP SCHEMA IF EXISTS {quoted_schema_name}").collect() -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_use_warehouse(db_parameters, sql_simplifier_enabled): parameters = db_parameters.copy() @@ -557,7 +543,6 @@ def test_use_warehouse(db_parameters, sql_simplifier_enabled): assert session.get_current_warehouse() == f'"{warehouse_name.upper()}"' -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_use_role(db_parameters, sql_simplifier_enabled): role_name = "PUBLIC" @@ -567,7 +552,6 @@ def test_use_role(db_parameters, sql_simplifier_enabled): assert session.get_current_role() == f'"{role_name}"' -@pytest.mark.localtest @pytest.mark.parametrize("obj", [None, "'object'", "obje\\ct", "obj\nect", r"\uobject"]) def test_use_negative_tests(session, obj): if obj: @@ -675,7 +659,6 @@ def test_sql_simplifier_disabled_on_session(db_parameters): assert new_session2.sql_simplifier_enabled is False -@pytest.mark.localtest @pytest.mark.skipif(IS_IN_STORED_PROC, reason="Cannot create session in SP") def test_create_session_from_default_config_file(monkeypatch, db_parameters): import tomlkit diff --git a/tests/integ/test_stored_procedure.py b/tests/integ/test_stored_procedure.py index b2027a38da7..289e95ef775 100644 --- a/tests/integ/test_stored_procedure.py +++ b/tests/integ/test_stored_procedure.py @@ -178,7 +178,6 @@ def return1(session_): ) -@pytest.mark.localtest def test_basic_stored_procedure(session, local_testing_mode): def return1(session_): return session_.create_dataframe([["1"]]).collect()[0][0] @@ -240,7 +239,6 @@ def sp_pow(session_, x, y): assert pow_sp(2, 10, session=session) == 1024 -@pytest.mark.localtest def test_stored_procedure_with_basic_column_datatype(session, local_testing_mode): expected_err = Exception if local_testing_mode else SnowparkSQLException @@ -265,7 +263,6 @@ def plus1(session_, x): assert "not recognized" in str(ex_info) or "Unexpected type" in str(ex_info) -@pytest.mark.localtest def test_stored_procedure_with_column_datatype(session, local_testing_mode): def add(session_, x, y): return x + y @@ -413,7 +410,6 @@ def test_sproc(session: Session) -> DataFrame: assert df.dtypes == expected_dtypes -@pytest.mark.localtest @pytest.mark.parametrize("anonymous", [True, False]) def test_call_table_sproc_triggers_action(session, anonymous): """Here we create a table sproc which creates a table. we call the table sproc using @@ -443,7 +439,6 @@ def create_temp_table_sp(session_: Session, name: str): Utils.drop_table(session, table_name) -@pytest.mark.localtest def test_recursive_function(session): # Test recursive function def factorial(session_, n): @@ -455,7 +450,6 @@ def factorial(session_, n): assert factorial_sp(3) == factorial(session, 3) -@pytest.mark.localtest def test_nested_function(session): def outer_func(session_): def inner_func(): @@ -485,7 +479,6 @@ def cube(session_, x): assert square_sp(2) == 4 -@pytest.mark.localtest def test_decorator_function(session): def decorator_do_twice(func): def wrapper(*args, **kwargs): @@ -508,7 +501,6 @@ def square(session_, x): assert square_twice_sp(2) == 16 -@pytest.mark.localtest def test_annotation_syntax(session): @sproc(return_type=IntegerType(), input_types=[IntegerType(), IntegerType()]) def add_sp(session_, x, y): @@ -523,7 +515,6 @@ def snow(session_): assert snow() == "snow" -@pytest.mark.localtest def test_register_sp_from_file(session, resources_path, tmpdir): test_files = TestFiles(resources_path) @@ -567,7 +558,6 @@ def test_register_sp_from_file(session, resources_path, tmpdir): ) -@pytest.mark.localtest def test_session_register_sp(session, local_testing_mode): add_sp = session.sproc.register( lambda session_, x, y: session_.create_dataframe([(x, y)]) @@ -593,7 +583,6 @@ def test_session_register_sp(session, local_testing_mode): Utils.assert_executed_with_query_tag(session, query_tag, local_testing_mode) -@pytest.mark.localtest def test_add_import_local_file(session, resources_path): test_files = TestFiles(resources_path) @@ -640,7 +629,6 @@ def plus4_then_mod5_direct_import(session_, x): session.clear_imports() -@pytest.mark.localtest def test_add_import_local_directory(session, resources_path): test_files = TestFiles(resources_path) @@ -684,7 +672,6 @@ def plus4_then_mod5_direct_import(session_, x): session.clear_imports() -@pytest.mark.localtest def test_add_import_stage_file(session, resources_path): test_files = TestFiles(resources_path) @@ -710,7 +697,6 @@ def plus4_then_mod5(session_, x): session.clear_imports() -@pytest.mark.localtest def test_sp_level_import(session, resources_path, local_testing_mode): test_files = TestFiles(resources_path) @@ -750,7 +736,6 @@ def plus4_then_mod5(session_, x): assert "No module named" in ex_info.value.message -@pytest.mark.localtest def test_type_hints(session): @sproc() def add_sp(session_: Session, x: int, y: int) -> int: @@ -803,7 +788,6 @@ def get_sp(_: Session, d: Dict[str, str], i: str) -> str: assert get_sp({"0": "snow", "1": "flake"}, "0") == "snow" -@pytest.mark.localtest def test_type_hint_no_change_after_registration(session): def add(session_: Session, x: int, y: int) -> int: return ( @@ -818,7 +802,6 @@ def add(session_: Session, x: int, y: int) -> int: assert annotations == add.__annotations__ -@pytest.mark.localtest def test_register_sp_from_file_type_hints(session, tmpdir): source = """ import datetime @@ -1633,7 +1616,6 @@ def return1(_): assert return1_sp() == 1 -@pytest.mark.localtest @pytest.mark.parametrize("execute_as", [None, "owner", "caller"]) def test_execute_as_options_while_registering_from_file( session, resources_path, tmpdir, execute_as @@ -1668,7 +1650,6 @@ def test_execute_as_options_while_registering_from_file( assert mod5_sp_stage(3) == 3 -@pytest.mark.localtest def test_call_sproc_with_session_as_first_argument(session): @sproc def return1(_: Session) -> int: diff --git a/tests/integ/test_udf.py b/tests/integ/test_udf.py index be9cc6cd842..a81d590b430 100644 --- a/tests/integ/test_udf.py +++ b/tests/integ/test_udf.py @@ -410,7 +410,6 @@ def test_session_register_udf(session, local_testing_mode): Utils.assert_executed_with_query_tag(session, query_tag) -@pytest.mark.localtest def test_register_udf_from_file(session, resources_path): test_files = TestFiles(resources_path) df = session.create_dataframe([[3, 4], [5, 6]]).to_df("a", "b") @@ -458,7 +457,6 @@ def test_register_vectorized_udf_from_file(session, resources_path): ) -@pytest.mark.localtest def test_register_udf_from_zip_file(session, resources_path, tmpdir): test_files = TestFiles(resources_path) df = session.create_dataframe([[3, 4], [5, 6]]).to_df("a", "b") @@ -483,7 +481,6 @@ def test_register_udf_from_zip_file(session, resources_path, tmpdir): ) -@pytest.mark.localtest def test_register_udf_from_remote_file(session, resources_path): test_files = TestFiles(resources_path) df = session.create_dataframe([[3, 4], [5, 6]]).to_df("a", "b") @@ -502,7 +499,6 @@ def test_register_udf_from_remote_file(session, resources_path): ) -@pytest.mark.localtest def test_register_udf_from_remote_file_with_statement_params( session, resources_path, local_testing_mode ): @@ -575,7 +571,6 @@ def test_register_from_file_with_skip_upload(session, resources_path, caplog): Utils.drop_stage(session, stage_name) -@pytest.mark.localtest def test_add_import_local_file(session, resources_path): test_files = TestFiles(resources_path) @@ -622,7 +617,6 @@ def plus4_then_mod5_with_2_level_import(x): session.clear_imports() -@pytest.mark.localtest def test_add_import_local_directory(session, resources_path): test_files = TestFiles(resources_path) @@ -666,7 +660,6 @@ def plus4_then_mod5_with_2_level_import(x): session.clear_imports() -@pytest.mark.localtest def test_add_import_stage_file(session, resources_path): test_files = TestFiles(resources_path) @@ -693,7 +686,6 @@ def plus4_then_mod5_with_import(x): session.clear_imports() -@pytest.mark.localtest @pytest.mark.skipif(not is_dateutil_available, reason="dateutil is required") def test_add_import_package(session): def plus_one_month(x): @@ -714,7 +706,6 @@ def plus_one_month(x): session.clear_imports() -@pytest.mark.localtest @pytest.mark.skipif( IS_IN_STORED_PROC, reason="SNOW-609328: support caplog in SP regression test" ) @@ -743,7 +734,6 @@ def test_add_import_duplicate(session, resources_path, caplog, local_testing_mod assert len(session.get_imports()) == 0 -@pytest.mark.localtest def test_udf_level_import(session, resources_path, local_testing_mode): test_files = TestFiles(resources_path) @@ -797,7 +787,6 @@ def plus4_then_mod5_with_import(x): session.clear_imports() -@pytest.mark.localtest def test_add_import_namespace_collision(session, resources_path): test_files = TestFiles(resources_path) @@ -833,7 +822,6 @@ def plus4_then_mod5(x): session.clear_imports() -@pytest.mark.localtest def test_add_import_namespace_collision_snowflake_package(session, tmp_path): fake_snowflake_dir = tmp_path / "snowflake" / "task" fake_snowflake_dir.mkdir(parents=True) @@ -929,7 +917,6 @@ def return_geometry_dict_udf(g: Geometry) -> Dict[str, str]: ) -@pytest.mark.localtest def test_type_hint_no_change_after_registration(session): def add(x: int, y: int) -> int: return x + y @@ -939,7 +926,6 @@ def add(x: int, y: int) -> int: assert annotations == add.__annotations__ -@pytest.mark.localtest def test_register_udf_from_file_type_hints(session, tmpdir): source = """ import datetime @@ -2127,7 +2113,6 @@ def plus1(x: int) -> int: session._run_query(f"drop function if exists {perm_func_name}(int)") -@pytest.mark.localtest def test_udf_class_method(session): # Note that we never mention in the doc that we support registering UDF from a class method. # However, some users might still be interested in doing that. @@ -2248,7 +2233,6 @@ def return1(): break -@pytest.mark.localtest @pytest.mark.skipif( IS_IN_STORED_PROC, reason="SNOW-609328: support caplog in SP regression test" ) @@ -2300,7 +2284,6 @@ def echo(num: int) -> int: (not is_pandas_available) or IS_IN_STORED_PROC, reason="numpy and pandas are required", ) -@pytest.mark.localtest @pytest.mark.parametrize("func", numpy_funcs) def test_numpy_udf(session, func): numpy_udf = udf( @@ -2541,7 +2524,6 @@ def return_success(): pytest.skip("External Access Integration is not supported on the deployment.") -@pytest.mark.localtest def test_access_snowflake_import_directory(session, resources_path): test_files = TestFiles(resources_path) diff --git a/tests/mock/test_filter.py b/tests/mock/test_filter.py index 956f012be93..e6c17741c85 100644 --- a/tests/mock/test_filter.py +++ b/tests/mock/test_filter.py @@ -4,13 +4,10 @@ import math -import pytest - from snowflake.snowpark import DataFrame, Row from snowflake.snowpark.functions import col -@pytest.mark.localtest def test_basic_filter(session): origin_df: DataFrame = session.create_dataframe( [ @@ -96,7 +93,6 @@ def test_basic_filter(session): ] -@pytest.mark.localtest def test_null_nan_filter(session): origin_df: DataFrame = session.create_dataframe( [ @@ -153,7 +149,6 @@ def test_null_nan_filter(session): assert res[2] == Row(None, None) -@pytest.mark.localtest def test_chain_filter(session): origin_df: DataFrame = session.create_dataframe( [ @@ -172,7 +167,6 @@ def test_chain_filter(session): ] -@pytest.mark.localtest def test_like_filter(session): origin_df: DataFrame = session.create_dataframe( [["test"], ["tttest"], ["tett"], ["ess"], ["es#!s"], ["es#)s"]], schema=["a"] @@ -200,7 +194,6 @@ def test_like_filter(session): ] -@pytest.mark.localtest def test_regex_filter(session): origin_df: DataFrame = session.create_dataframe( [["test"], ["tttest"], ["tett"], ["ess"], ["es#%s"]], schema=["a"] diff --git a/tests/mock/test_functions.py b/tests/mock/test_functions.py index 567eb7e20f2..861b7fd4f7b 100644 --- a/tests/mock/test_functions.py +++ b/tests/mock/test_functions.py @@ -4,8 +4,6 @@ import datetime import math -import pytest - from snowflake.snowpark import DataFrame, Row from snowflake.snowpark.functions import ( # count,; is_null,; abs, @@ -23,7 +21,6 @@ ) -@pytest.mark.localtest def test_col(session): origin_df: DataFrame = session.create_dataframe( [ @@ -38,7 +35,6 @@ def test_col(session): assert origin_df.select(col("o")).collect() == [Row(True), Row(False), Row(None)] -@pytest.mark.localtest def test_max(session): origin_df: DataFrame = session.create_dataframe( [ @@ -59,7 +55,6 @@ def test_max(session): assert math.isnan(origin_df.select(max("s").as_("g")).collect()[0][0]) -@pytest.mark.localtest def test_min(session): origin_df: DataFrame = session.create_dataframe( [ @@ -81,7 +76,6 @@ def test_min(session): assert math.isnan(origin_df.select(min("s").as_("g")).collect()[0][0]) -@pytest.mark.localtest def test_to_date(session): origin_df: DataFrame = session.create_dataframe( ["2013-05-17", "31536000000000"], @@ -94,7 +88,6 @@ def test_to_date(session): ] -@pytest.mark.localtest def test_contains(session): origin_df: DataFrame = session.create_dataframe( [ @@ -133,7 +126,6 @@ def test_contains(session): ] -@pytest.mark.localtest def test_abs(session): origin_df: DataFrame = session.create_dataframe( [ @@ -146,7 +138,6 @@ def test_abs(session): assert origin_df.select(abs(col("m"))).collect() == [Row(1), Row(1), Row(2)] -@pytest.mark.localtest def test_asc_and_desc(session): origin_df: DataFrame = session.create_dataframe( [ @@ -165,7 +156,6 @@ def test_asc_and_desc(session): assert origin_df.sort(desc(col("v"))).collect() == expected -@pytest.mark.localtest def test_count(session): origin_df: DataFrame = session.create_dataframe( [ @@ -181,7 +171,6 @@ def test_count(session): assert origin_df.select(count("v")).collect() == [Row(6)] -@pytest.mark.localtest def test_is_null(session): origin_df: DataFrame = session.create_dataframe( [ @@ -202,7 +191,6 @@ def test_is_null(session): ] -@pytest.mark.localtest def test_take_first(session): origin_df: DataFrame = session.create_dataframe( [ @@ -244,7 +232,6 @@ def test_take_first(session): assert math.isnan(res[4][0]) and res[4][1] == 200 and res[4][2] is None -@pytest.mark.localtest def test_show(session): origin_df: DataFrame = session.create_dataframe( [ diff --git a/tests/mock/test_sort.py b/tests/mock/test_sort.py index e7b5065f43f..9d96ab9b57f 100644 --- a/tests/mock/test_sort.py +++ b/tests/mock/test_sort.py @@ -2,14 +2,11 @@ # Copyright (c) 2012-2024 Snowflake Computing Inc. All rights reserved. # -import pytest - from snowflake.snowpark import DataFrame, Row from snowflake.snowpark.functions import col from tests.utils import Utils -@pytest.mark.localtest def test_sort_single_column(session): origin_df: DataFrame = session.create_dataframe( [ @@ -63,7 +60,6 @@ def test_sort_single_column(session): ) -@pytest.mark.localtest def test_sort_multiple_column(session): origin_df: DataFrame = session.create_dataframe( [ diff --git a/tests/mock/test_stage_registry.py b/tests/mock/test_stage_registry.py index 9e9b5a6885f..e6dce6a86a8 100644 --- a/tests/mock/test_stage_registry.py +++ b/tests/mock/test_stage_registry.py @@ -15,7 +15,6 @@ ) -@pytest.mark.localtest def test_util(): assert extract_stage_name_and_prefix("@stage") == ("stage", "") assert extract_stage_name_and_prefix("@stage/dir") == ("stage", "dir") @@ -26,7 +25,6 @@ def test_util(): ) -@pytest.mark.localtest def test_stage_put_file(): stage_registry = StageEntityRegistry(MockServerConnection()) stage_registry.create_or_replace_stage("test_stage") @@ -125,7 +123,6 @@ def test_stage_put_file(): ) -@pytest.mark.localtest def test_stage_put_stream(): stage_registry = StageEntityRegistry(MockServerConnection()) stage_registry.create_or_replace_stage("test_stage") @@ -195,7 +192,6 @@ def test_stage_put_stream(): } -@pytest.mark.locatest def test_stage_get_file(): stage_registry = StageEntityRegistry(MockServerConnection()) stage_registry.put( diff --git a/tests/mock/test_udf.py b/tests/mock/test_udf.py index b029fe20559..5cbc0ad1957 100644 --- a/tests/mock/test_udf.py +++ b/tests/mock/test_udf.py @@ -15,7 +15,6 @@ from snowflake.snowpark.types import IntegerType -@pytest.mark.localtest def test_udf_cleanup_on_err(session): cur_dir = os.path.dirname(os.path.realpath(__file__)) test_file = os.path.join(cur_dir, "files", "udf_file.py") @@ -38,7 +37,6 @@ def test_udf_cleanup_on_err(session): ) # assert sys.path is cleaned up after UDF exits on exception -@pytest.mark.localtest def test_registering_udf_with_qualified_identifier(session): custom_schema = "test_identifier_schema" @@ -68,7 +66,6 @@ def add_fn(x: int, y: int) -> int: ) -@pytest.mark.localtest def test_registering_sproc_with_qualified_identifier(session): custom_schema = "test_identifier_schema" diff --git a/tests/mock/test_union.py b/tests/mock/test_union.py index 3b80b596983..7d283cd1e6d 100644 --- a/tests/mock/test_union.py +++ b/tests/mock/test_union.py @@ -2,13 +2,11 @@ # Copyright (c) 2012-2024 Snowflake Computing Inc. All rights reserved. # -import pytest from snowflake.snowpark import DataFrame, Row from tests.utils import Utils -@pytest.mark.localtest def test_union_basic(session): df1: DataFrame = session.create_dataframe( [ @@ -79,7 +77,6 @@ def test_union_basic(session): ) -@pytest.mark.localtest def test_union_by_name(session): df1: DataFrame = session.create_dataframe( [ diff --git a/tox.ini b/tox.ini index 3fd154c9e76..e07d9dc1c5f 100644 --- a/tox.ini +++ b/tox.ini @@ -194,8 +194,6 @@ markers = doctest: doctest tests # Other markers timeout: tests that need a timeout time - localtest: local tests - scala: scala tests modin_sp_short_regress: modin short_regress tests run in sproc modin_sp_precommit: modin precommit tests run in sproc addopts = --doctest-modules --timeout=1200