diff --git a/CHANGELOG.md b/CHANGELOG.md index 06befbb6cda..ba34f8ccafb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ - Added support for an optional `date_part` argument in function `last_day` - `SessionBuilder.app_name` will set the query_tag after the session is created. - Added support for the following local testing functions: + - current_timestamp + - current_date + - current_time - strip_null_value - upper - lower diff --git a/src/snowflake/snowpark/mock/_functions.py b/src/snowflake/snowpark/mock/_functions.py index 6b43c551828..81ff9d61c09 100644 --- a/src/snowflake/snowpark/mock/_functions.py +++ b/src/snowflake/snowpark/mock/_functions.py @@ -30,6 +30,7 @@ MapType, NullType, StringType, + TimestampTimeZone, TimestampType, TimeType, VariantType, @@ -309,6 +310,26 @@ def mock_to_date( ) +@patch("current_timestamp") +def mock_current_timestamp(): + return ColumnEmulator( + data=datetime.datetime.now(), + sf_type=ColumnType(TimestampType(TimestampTimeZone.LTZ), False), + ) + + +@patch("current_date") +def mock_current_date(): + now = datetime.datetime.now() + return ColumnEmulator(data=now.date(), sf_type=ColumnType(DateType(), False)) + + +@patch("current_time") +def mock_current_time(): + now = datetime.datetime.now() + return ColumnEmulator(data=now.time(), sf_type=ColumnType(TimeType(), False)) + + @patch("contains") def mock_contains(expr1: ColumnEmulator, expr2: ColumnEmulator): if isinstance(expr1, str) and isinstance(expr2, str): diff --git a/tests/integ/test_function.py b/tests/integ/test_function.py index ece8c528a93..2fa6499fb1b 100644 --- a/tests/integ/test_function.py +++ b/tests/integ/test_function.py @@ -206,12 +206,22 @@ def test_order(session): ] +@pytest.mark.localtest def test_current_date_and_time(session): - df1 = session.sql("select current_date(), current_time(), current_timestamp()") - df2 = session.create_dataframe([1]).select( + max_delta = 1 + df = session.create_dataframe([1]).select( current_date(), current_time(), current_timestamp() ) - assert len(df1.union(df2).collect()) == 1 + rows = df.collect() + + assert len(rows) == 1, "df1 should only contain 1 row" + date, time, timestamp = rows[0] + time1 = datetime.datetime.combine(date, time).timestamp() + time2 = timestamp.timestamp() + + assert time1 == pytest.approx( + time2, max_delta + ), f"Times should be within {max_delta} seconds of each other." @pytest.mark.parametrize("col_a", ["a", col("a")]) @@ -1646,65 +1656,61 @@ def _result_str2lst(result): def test_create_map(session): df = session.create_dataframe( [("Sales", 6500, "USA"), ("Legal", 3000, None)], - ("department", "salary", "location") + ("department", "salary", "location"), ) # Case 1: create_map with column names Utils.check_answer( df.select(create_map("department", "salary").alias("map")), - [ - Row(MAP='{\n "Sales": 6500\n}'), - Row(MAP='{\n "Legal": 3000\n}') - ], + [Row(MAP='{\n "Sales": 6500\n}'), Row(MAP='{\n "Legal": 3000\n}')], sort=False, ) # Case 2: create_map with column objects Utils.check_answer( df.select(create_map(df.department, df.salary).alias("map")), - [ - Row(MAP='{\n "Sales": 6500\n}'), - Row(MAP='{\n "Legal": 3000\n}') - ], + [Row(MAP='{\n "Sales": 6500\n}'), Row(MAP='{\n "Legal": 3000\n}')], sort=False, ) # Case 3: create_map with a list of column names Utils.check_answer( df.select(create_map(["department", "salary"]).alias("map")), - [ - Row(MAP='{\n "Sales": 6500\n}'), - Row(MAP='{\n "Legal": 3000\n}') - ], + [Row(MAP='{\n "Sales": 6500\n}'), Row(MAP='{\n "Legal": 3000\n}')], sort=False, ) # Case 4: create_map with a list of column objects Utils.check_answer( df.select(create_map([df.department, df.salary]).alias("map")), - [ - Row(MAP='{\n "Sales": 6500\n}'), - Row(MAP='{\n "Legal": 3000\n}') - ], + [Row(MAP='{\n "Sales": 6500\n}'), Row(MAP='{\n "Legal": 3000\n}')], sort=False, ) # Case 5: create_map with constant values Utils.check_answer( - df.select(create_map(lit("department"), col("department"), lit("salary"), col("salary")).alias("map")), + df.select( + create_map( + lit("department"), col("department"), lit("salary"), col("salary") + ).alias("map") + ), [ Row(MAP='{\n "department": "Sales",\n "salary": 6500\n}'), - Row(MAP='{\n "department": "Legal",\n "salary": 3000\n}') + Row(MAP='{\n "department": "Legal",\n "salary": 3000\n}'), ], sort=False, ) # Case 6: create_map with a nested map Utils.check_answer( - df.select(create_map(col("department"), create_map(lit("salary"), col("salary"))).alias("map")), + df.select( + create_map( + col("department"), create_map(lit("salary"), col("salary")) + ).alias("map") + ), [ Row(MAP='{\n "Sales": {\n "salary": 6500\n }\n}'), - Row(MAP='{\n "Legal": {\n "salary": 3000\n }\n}') + Row(MAP='{\n "Legal": {\n "salary": 3000\n }\n}'), ], sort=False, ) @@ -1712,19 +1718,24 @@ def test_create_map(session): # Case 7: create_map with None values Utils.check_answer( df.select(create_map("department", "location").alias("map")), - [ - Row(MAP='{\n "Sales": "USA"\n}'), - Row(MAP='{\n "Legal": null\n}') - ], + [Row(MAP='{\n "Sales": "USA"\n}'), Row(MAP='{\n "Legal": null\n}')], sort=False, ) # Case 8: create_map dynamic creation Utils.check_answer( - df.select(create_map(list(chain(*((lit(name), col(name)) for name in df.columns)))).alias("map")), + df.select( + create_map( + list(chain(*((lit(name), col(name)) for name in df.columns))) + ).alias("map") + ), [ - Row(MAP='{\n "DEPARTMENT": "Sales",\n "LOCATION": "USA",\n "SALARY": 6500\n}'), - Row(MAP='{\n "DEPARTMENT": "Legal",\n "LOCATION": null,\n "SALARY": 3000\n}') + Row( + MAP='{\n "DEPARTMENT": "Sales",\n "LOCATION": "USA",\n "SALARY": 6500\n}' + ), + Row( + MAP='{\n "DEPARTMENT": "Legal",\n "LOCATION": null,\n "SALARY": 3000\n}' + ), ], sort=False, ) @@ -1732,10 +1743,7 @@ def test_create_map(session): # Case 9: create_map without columns Utils.check_answer( df.select(create_map().alias("map")), - [ - Row(MAP='{}'), - Row(MAP='{}') - ], + [Row(MAP="{}"), Row(MAP="{}")], sort=False, ) @@ -1743,15 +1751,21 @@ def test_create_map(session): def test_create_map_negative(session): df = session.create_dataframe( [("Sales", 6500, "USA"), ("Legal", 3000, None)], - ("department", "salary", "location") + ("department", "salary", "location"), ) # Case 1: create_map with odd number of columns with pytest.raises(ValueError) as ex_info: df.select(create_map("department").alias("map")) - assert "The 'create_map' function requires an even number of parameters but the actual number is 1" in str(ex_info) + assert ( + "The 'create_map' function requires an even number of parameters but the actual number is 1" + in str(ex_info) + ) # Case 2: create_map with odd number of columns (list) with pytest.raises(ValueError) as ex_info: df.select(create_map([df.department, df.salary, df.location]).alias("map")) - assert "The 'create_map' function requires an even number of parameters but the actual number is 3" in str(ex_info) + assert ( + "The 'create_map' function requires an even number of parameters but the actual number is 3" + in str(ex_info) + )