Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[IR] Steel-thread example for pandas iloc get #1727

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 60 additions & 1 deletion src/snowflake/snowpark/_internal/ast.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,14 @@
import json
import sys
import uuid
from typing import Any, Sequence, Tuple
from decimal import Decimal
from inspect import signature
from typing import Any, Callable, Sequence, Tuple

from google.protobuf.json_format import ParseDict
from numpy import datetime64, float64, int32, int64
from pandas import Timestamp
from pandas.core.dtypes.inference import is_list_like

import snowflake.snowpark._internal.proto.ast_pb2 as proto
from snowflake.connector.arrow_context import ArrowConverterContext
Expand All @@ -28,6 +33,60 @@ def expr_to_dataframe_expr(expr):
return dfe


# Map from python type to its corresponding IR entity. The entities below all have the 'v' attribute.
TYPE_TO_IR_TYPE_NAME = {
bytes: "binary_val",
bool: "bool_val",
datetime64: "date_val",
Decimal: "big_decimal_val",
float64: "float_64_val",
int32: "int_32_val",
int64: "int_64_val",
str: "string_val",
Timestamp: "timestamp_val",
}


def ast_expr_from_python_val(expr, val):
"""
Converts a Python value to an IR expression.
This IR expression is set to an attribute of `expr`.

Parameters
----------
expr : IR entity protobuf builder
val : Python value that needs to be converted to IR expression.
"""
if val is None:
expr.none_val = val
val_type = type(val)
if val_type not in TYPE_TO_IR_TYPE_NAME:
# Modin is imported here to prevent circular import issues.
from snowflake.snowpark.modin.pandas import DataFrame, Series

if isinstance(val, Callable):
for item in signature(val).parameters:
item_expr = expr.fn_val.params.add()
item_expr.v = item
expr.fn_val.body = val
if isinstance(val, slice):
expr.slice_val.start.v = val.start
expr.slice_val.stop.v = val.stop
expr.slice_val.step.v = val.step
elif not isinstance(val, Series) and is_list_like(val):
# Checking that val is not a Series since Series objects are considered list-like.
for item in val:
item_expr = expr.list_val.add()
item_expr.v = item
elif isinstance(val, Series):
sfc-gh-azwiegincew marked this conversation as resolved.
Show resolved Hide resolved
expr.series_val.ref = val
elif isinstance(val, DataFrame):
expr.series_val.ref = val
else:
ir_type_name = TYPE_TO_IR_TYPE_NAME[val_type]
setattr(getattr(expr, ir_type_name), "v", val) # noqa: B010


def check_response(response: Any) -> None:
# TODO SNOW-1474659: Add logic here to check whether response is a valid result,
# else raise client-compatible exceptions.
Expand Down
1,179 changes: 595 additions & 584 deletions src/snowflake/snowpark/_internal/proto/ast_pb2.py

Large diffs are not rendered by default.

40 changes: 25 additions & 15 deletions src/snowflake/snowpark/column.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#

import sys
from typing import Any, Dict, Optional, Union
from typing import Optional, Union

import snowflake.snowpark
import snowflake.snowpark._internal.proto.ast_pb2 as proto
Expand Down Expand Up @@ -624,20 +624,20 @@ def __invert__(self) -> "Column":
def _cast(self, to: Union[str, DataType], try_: bool = False) -> "Column":
if isinstance(to, str):
to = type_string_to_type_object(to)
if try_:
ast = Column._create_ast(
property="sp_column_try_cast",
copy_messages={"col": self._ast},
)
to._fill_ast(ast.sp_column_try_cast.to)
else:
ast = Column._create_ast(
property="sp_column_cast",
copy_messages={"col": self._ast},
)
to._fill_ast(ast.sp_column_cast.to)

return Column(Cast(self._expression, to, try_), ast=ast)
# if try_:
# ast = Column._create_ast(
# property="sp_column_try_cast",
# copy_messages={"col": self._ast},
# )
# to._fill_ast(ast.sp_column_try_cast.to)
# else:
# ast = Column._create_ast(
# property="sp_column_cast",
# copy_messages={"col": self._ast},
# )
# to._fill_ast(ast.sp_column_cast.to)

return Column(Cast(self._expression, to, try_), ast=None)

def cast(self, to: Union[str, DataType]) -> "Column":
"""Casts the value of the Column to the specified data type.
Expand Down Expand Up @@ -989,6 +989,16 @@ def _create_ast(
Returns:
proto.SpColumnExpr: Returns fully populated SpColumnExpr AST from given arguments
"""
return None
# Avoid using mutable values as default values in function signature.
if fill_expr_asts is None:
fill_expr_asts = {}
if copy_messages is None:
copy_messages = {}
if assign_opt_fields is None:
assign_opt_fields = {}
if assign_fields is None:
assign_fields = {}

ast = proto.Expr()
if property is not None:
Expand Down
2 changes: 1 addition & 1 deletion src/snowflake/snowpark/dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,8 @@
decode_ast_response_from_snowpark,
)
from snowflake.snowpark._internal.ast_utils import (
set_src_position,
get_symbol,
set_src_position,
setattr_if_not_none,
)
from snowflake.snowpark._internal.error_message import SnowparkClientExceptionMessages
Expand Down
6 changes: 3 additions & 3 deletions src/snowflake/snowpark/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,11 +250,11 @@ def col(df_alias: str, col_name: str) -> Column:
... # pragma: no cover


def col(name1: str, name2: Optional[str] = None) -> Column:
def col(name1: str, name2: Optional[str] = None, ast=None) -> Column:
if name2 is None:
return Column(name1)
return Column(name1, ast=ast)
else:
return Column(name1, name2)
return Column(name1, name2, ast=ast)


@overload
Expand Down
14 changes: 13 additions & 1 deletion src/snowflake/snowpark/modin/pandas/dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ def __init__(
dtype=None,
copy=None,
query_compiler=None,
ast_stmt=None,
) -> None:
# TODO: SNOW-1063346: Modin upgrade - modin.pandas.DataFrame functions
# Siblings are other dataframes that share the same query compiler. We
Expand All @@ -158,6 +159,11 @@ def __init__(

self._siblings = []

if not ast_stmt:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In column.py we're using if ... is None; we should use a consistent pattern for this check – whatever is the canonical approach in Python. It seems that is None makes more sense?

ast_stmt = pd.session._ast_batch.assign()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't really understand the point of creating an assign node like this. If a statement isn't actually supplied, how a "default" one won't really work. Is this because of the way that Pandas interacts with this code?

self._ast_id = ast_stmt.var_id.bitfield1
self._ast_stmt = ast_stmt

# Engine.subscribe(_update_engine)
if isinstance(data, (DataFrame, Series)):
self._query_compiler = data._query_compiler.copy()
Expand Down Expand Up @@ -275,6 +281,9 @@ def __repr__(self):
) = self._query_compiler.build_repr_df(num_rows, num_cols, "x")
result = repr(repr_df)

_, ast = pd.session._pd_ast_batch.flush()
print(ast) # noqa: T201

# if truncated, add shape information
if is_repr_truncated(row_count, col_count, num_rows, num_cols):
# The split here is so that we don't repr pandas row lengths.
Expand Down Expand Up @@ -2864,7 +2873,10 @@ def __setattr__(self, key, value):
# - `_siblings`, which Modin initializes before it appears in __dict__
# - `_cache`, which pandas.cache_readonly uses to cache properties
# before it appears in __dict__.
if key in ("_query_compiler", "_siblings", "_cache") or key in self.__dict__:
if (
key in ("_query_compiler", "_siblings", "_cache", "_ast_id", "_ast_stmt")
or key in self.__dict__
):
pass
elif key in self and key not in dir(self):
self.__setitem__(key, value)
Expand Down
18 changes: 16 additions & 2 deletions src/snowflake/snowpark/modin/pandas/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@

import snowflake.snowpark.modin.pandas as pd
import snowflake.snowpark.modin.pandas.utils as frontend_utils
from snowflake.snowpark._internal.ast import ast_expr_from_python_val
from snowflake.snowpark.modin.pandas.base import BasePandasDataset
from snowflake.snowpark.modin.pandas.dataframe import DataFrame
from snowflake.snowpark.modin.pandas.series import (
Expand Down Expand Up @@ -1049,6 +1050,15 @@ def __getitem__(

original_row_loc = row_loc # keep a copy for error message

# IR changes! Add iloc get nodes to AST.
stmt = pd.session._pd_ast_batch.assign()
ast = stmt.expr
ast.pd_dataframe_i_loc.df.var_id.bitfield1 = self.df._ast_id
# Map python built-ins (functions, scalars, lists, slices, etc.) to AST expr and emit Ref nodes for dataframes,
# series, and indexes.
ast_expr_from_python_val(ast.pd_dataframe_i_loc.rows, row_loc)
ast_expr_from_python_val(ast.pd_dataframe_i_loc.columns, col_loc)

# Convert range to slice objects.
if not isinstance(row_loc, pd.Series) and is_range_like(row_loc):
row_loc = self._convert_range_to_valid_slice(row_loc)
Expand All @@ -1057,7 +1067,7 @@ def __getitem__(

# Convert all scalar, list-like, and indexer row_loc to a Series object to get a query compiler object.
if is_scalar(row_loc):
row_loc = pd.Series([row_loc])
row_loc = pd.Series([row_loc], ast_stmt=stmt)
elif is_list_like(row_loc):
if hasattr(row_loc, "dtype"):
dtype = row_loc.dtype
Expand All @@ -1066,7 +1076,7 @@ def __getitem__(
dtype = float
else:
dtype = None
row_loc = pd.Series(row_loc, dtype=dtype)
row_loc = pd.Series(row_loc, dtype=dtype, ast_stmt=stmt)

# Check whether the row and column input is of numeric dtype.
self._validate_numeric_get_key_values(row_loc, original_row_loc)
Expand All @@ -1089,6 +1099,10 @@ def __getitem__(
if isinstance(result, Series):
result._parent = self.df
result._parent_axis = 0

_, ast = pd.session._ast_batch.flush()
print(ast) # noqa: T201

return result

def _get_pandas_object_from_qc_view(
Expand Down
14 changes: 13 additions & 1 deletion src/snowflake/snowpark/modin/pandas/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ def __init__(
copy=False,
fastpath=False,
query_compiler=None,
ast_stmt=None,
) -> None:
# TODO: SNOW-1063347: Modin upgrade - modin.pandas.Series functions
# Siblings are other dataframes that share the same query compiler. We
Expand All @@ -130,6 +131,11 @@ def __init__(
# modified:
# Engine.subscribe(_update_engine)

if not ast_stmt:
ast_stmt = pd.session._ast_batch.assign()
self._ast_id = ast_stmt.var_id.bitfield1
self._ast_stmt = ast_stmt

if isinstance(data, type(self)):
query_compiler = data._query_compiler.copy()
if index is not None:
Expand Down Expand Up @@ -196,7 +202,9 @@ def _set_name(self, name):
else:
columns = [name]
self._update_inplace(
new_query_compiler=self._query_compiler.set_columns(columns)
new_query_compiler=self._query_compiler.set_columns(
columns, ast_stmt=self._ast_stmt
)
)

name = property(_get_name, _set_name)
Expand Down Expand Up @@ -492,6 +500,10 @@ def __repr__(self):
if temp_df.empty
else temp_str.rsplit("dtype: ", 1)[-1]
)

_, ast = pd.session._ast_batch.flush()
print(ast) # noqa: T201

if row_count == 0:
return f"Series([], {freq_str}{name_str}{dtype_str}"
maxsplit = 1
Expand Down
8 changes: 6 additions & 2 deletions src/snowflake/snowpark/modin/plugin/_internal/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from pandas._typing import IndexLabel
from pandas.core.dtypes.common import is_object_dtype

import snowflake.snowpark._internal.proto.ast_pb2 as proto
from snowflake.snowpark._internal.analyzer.analyzer_utils import (
quote_name_without_upper_casing,
)
Expand Down Expand Up @@ -802,7 +803,9 @@ def project_columns(
)

def rename_snowflake_identifiers(
self, old_to_new_identifiers: dict[str, str]
self,
old_to_new_identifiers: dict[str, str],
ast_stmt: Optional[proto.Expr] = None,
) -> "InternalFrame":
"""
Rename columns for underlying ordered dataframe.
Expand Down Expand Up @@ -847,7 +850,8 @@ def rename_snowflake_identifiers(
# retain the original column
select_list.append(old_id)
else:
select_list.append(col(old_id).as_(new_id))
ast = ast_stmt.expr if ast_stmt is not None else None
select_list.append(col(old_id, ast=ast).as_(new_id))
# if the old column is part of the ordering or row position columns, retains the column
# as part of the projected columns.
if old_id in ordering_and_row_position_columns:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@
from pandas.io.formats.format import format_percentiles
from pandas.io.formats.printing import PrettyDict

import snowflake.snowpark._internal.proto.ast_pb2 as proto
import snowflake.snowpark.modin.pandas as pd
from snowflake.snowpark._internal.analyzer.analyzer_utils import (
quote_name_without_upper_casing,
Expand Down Expand Up @@ -1250,7 +1251,11 @@ def columns(self) -> "pd.Index":
# TODO SNOW-837664: add more tests for df.columns
return self._modin_frame.data_columns_index

def set_columns(self, new_pandas_labels: Axes) -> "SnowflakeQueryCompiler":
def set_columns(
self,
new_pandas_labels: Axes,
ast_stmt: Optional[proto.Expr] = None,
) -> "SnowflakeQueryCompiler":
"""
Set pandas column labels with the new column labels

Expand Down Expand Up @@ -1288,7 +1293,8 @@ def set_columns(self, new_pandas_labels: Axes) -> "SnowflakeQueryCompiler":
)

renamed_frame = self._modin_frame.rename_snowflake_identifiers(
renamed_quoted_identifier_mapping
renamed_quoted_identifier_mapping,
ast_stmt=ast_stmt,
)

new_internal_frame = InternalFrame.create(
Expand Down
1 change: 1 addition & 0 deletions src/snowflake/snowpark/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,6 +509,7 @@ def __init__(
self._conn.create_coprocessor()

self._ast_batch = AstBatch(self)
self._pd_ast_batch = AstBatch(self)

_logger.info("Snowpark Session information: %s", self._session_info)

Expand Down
2 changes: 1 addition & 1 deletion tests/ast/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ pytest --update-expectations tests/ast
For these tests to work, the Unparser must be built in the monorepo:
```bash
cd my-monorepo-path
cd Snowflake/unparser
cd Snowflake/trunk/Snowpark/unparser
sbt assembly
```

Expand Down
18 changes: 18 additions & 0 deletions tests/ast/data/pd-iloc.test
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
## TEST CASE

import modin.pandas as pd
import snowflake.snowpark.modin.plugin
from snowflake.snowpark import Session

session = Session.builder.create()
df = pd.DataFrame(
[[1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14], [15, 16, 17, 18, 19, 20, 21]],
columns=["A", "B", "C", "D", "E", "F", "G"],
)
result = df.iloc[2, 2]
print(result)

## EXPECTED OUTPUT

CgIIKhIICgYKBAgDEAsaCgoICAESAggBIgAafAp6CAISAggCGnDiB20KawphL1VzZXJzL3ZidWRhdGkvYW5hY29uZGEzL2VudnMvc25vdy9saWIvcHl0aG9uMy4xMS9zaXRlLXBhY2thZ2VzL21vZGluL2xvZ2dpbmcvbG9nZ2VyX2RlY29yYXRvci5weRB9GBcgfSgrIgAaCgoICAMSAggDIgAafAp6CAQSAggEGnDiB20KawphL1VzZXJzL3ZidWRhdGkvYW5hY29uZGEzL2VudnMvc25vdy9saWIvcHl0aG9uMy4xMS9zaXRlLXBhY2thZ2VzL21vZGluL2xvZ2dpbmcvbG9nZ2VyX2RlY29yYXRvci5weRB9GBcgfSgrIgAaCgoICAUSAggFIgAaxwEKxAEIBhICCAYauQHiB7UBCmsKYS9Vc2Vycy92YnVkYXRpL2FuYWNvbmRhMy9lbnZzL3Nub3cvbGliL3B5dGhvbjMuMTEvc2l0ZS1wYWNrYWdlcy9tb2Rpbi9sb2dnaW5nL2xvZ2dlcl9kZWNvcmF0b3IucHkQfRgXIH0oKxIH2gEEEgIIBBoQmgQNEgsiX19pbmRleF9fIhoSmgQPEg0iX19yZWR1Y2VkX18iGheaBBQSEiJfX3Jvd19wb3NpdGlvbl9fIiIAGvkBCvYBCAcSAggHGusB4gfnAQprCmEvVXNlcnMvdmJ1ZGF0aS9hbmFjb25kYTMvZW52cy9zbm93L2xpYi9weXRob24zLjExL3NpdGUtcGFja2FnZXMvbW9kaW4vbG9nZ2luZy9sb2dnZXJfZGVjb3JhdG9yLnB5EH0YFyB9KCsSB9oBBBICCAIaEJoEDRILIl9faW5kZXhfXyIaCJoEBRIDIkEiGgiaBAUSAyJCIhoImgQFEgMiQyIaCJoEBRIDIkQiGgiaBAUSAyJFIhoImgQFEgMiRiIaCJoEBRIDIkciGheaBBQSEiJfX3Jvd19wb3NpdGlvbl9fIiIAGo8CCowCCAgSAggIGoEC4gf9AQprCmEvVXNlcnMvdmJ1ZGF0aS9hbmFjb25kYTMvZW52cy9zbm93L2xpYi9weXRob24zLjExL3NpdGUtcGFja2FnZXMvbW9kaW4vbG9nZ2luZy9sb2dnZXJfZGVjb3JhdG9yLnB5EH0YFyB9KCsSB9oBBBICCAcaEJoEDRILIl9faW5kZXhfXyIaCJoEBRIDIkEiGgiaBAUSAyJCIhoImgQFEgMiQyIaCJoEBRIDIkQiGgiaBAUSAyJFIhoImgQFEgMiRiIaCJoEBRIDIkciGheaBBQSEiJfX3Jvd19wb3NpdGlvbl9fIhoUmgQREg8iX19yb3dfY291bnRfXyIiABqGAgqDAggJEgIICRr4AeIH9AEKawphL1VzZXJzL3ZidWRhdGkvYW5hY29uZGEzL2VudnMvc25vdy9saWIvcHl0aG9uMy4xMS9zaXRlLXBhY2thZ2VzL21vZGluL2xvZ2dpbmcvbG9nZ2VyX2RlY29yYXRvci5weRB9GBcgfSgrGhCaBA0SCyJfX2luZGV4X18iGgiaBAUSAyJBIhoImgQFEgMiQiIaCJoEBRIDIkMiGgiaBAUSAyJEIhoImgQFEgMiRSIaCJoEBRIDIkYiGgiaBAUSAyJHIhoXmgQUEhIiX19yb3dfcG9zaXRpb25fXyIaFJoEERIPIl9fcm93X2NvdW50X18iIgAarQIKqgIIChICCAoanwLiB5sCCmsKYS9Vc2Vycy92YnVkYXRpL2FuYWNvbmRhMy9lbnZzL3Nub3cvbGliL3B5dGhvbjMuMTEvc2l0ZS1wYWNrYWdlcy9tb2Rpbi9sb2dnaW5nL2xvZ2dlcl9kZWNvcmF0b3IucHkQfRgXIH0oKxIH2gEEEgIICRoQmgQNEgsiX19pbmRleF9fIhoImgQFEgMiQSIaCJoEBRIDIkIiGgiaBAUSAyJDIhoImgQFEgMiRCIaCJoEBRIDIkUiGgiaBAUSAyJGIhoImgQFEgMiRyIaF5oEFBISIl9fcm93X3Bvc2l0aW9uX18iGhSaBBESDyJfX3Jvd19jb3VudF9fIhocmgQZEhciX19yb3dfcG9zaXRpb25fejVpc19fIiIAGswBCskBCAsSAggLGr4B4ge6AQprCmEvVXNlcnMvdmJ1ZGF0aS9hbmFjb25kYTMvZW52cy9zbm93L2xpYi9weXRob24zLjExL3NpdGUtcGFja2FnZXMvbW9kaW4vbG9nZ2luZy9sb2dnZXJfZGVjb3JhdG9yLnB5EH0YFyB9KCsSB9oBBBICCAYaEJoEDRILIl9faW5kZXhfXyIaF5oEFBISIl9fcmVkdWNlZF9fX29vczgiGheaBBQSEiJfX3Jvd19wb3NpdGlvbl9fIiIAGtoBCtcBCAwSAggMGswB4gfIAQprCmEvVXNlcnMvdmJ1ZGF0aS9hbmFjb25kYTMvZW52cy9zbm93L2xpYi9weXRob24zLjExL3NpdGUtcGFja2FnZXMvbW9kaW4vbG9nZ2luZy9sb2dnZXJfZGVjb3JhdG9yLnB5EH0YFyB9KCsSB9oBBBICCAoaHJoEGRIXIl9fcm93X3Bvc2l0aW9uX3o1aXNfXyIaFJoEERIPIl9fcm93X2NvdW50X18iGhyaBBkSFyJfX3Jvd19wb3NpdGlvbl9fX3A2YWIiIgAahgEKgwEIDRICCA0aeeIHdgprCmEvVXNlcnMvdmJ1ZGF0aS9hbmFjb25kYTMvZW52cy9zbm93L2xpYi9weXRob24zLjExL3NpdGUtcGFja2FnZXMvbW9kaW4vbG9nZ2luZy9sb2dnZXJfZGVjb3JhdG9yLnB5EH0YFyB9KCsSB9oBBBICCAsiABqGAQqDAQgOEgIIDhp54gd2CmsKYS9Vc2Vycy92YnVkYXRpL2FuYWNvbmRhMy9lbnZzL3Nub3cvbGliL3B5dGhvbjMuMTEvc2l0ZS1wYWNrYWdlcy9tb2Rpbi9sb2dnaW5nL2xvZ2dlcl9kZWNvcmF0b3IucHkQfRgXIH0oKxIH2gEEEgIIDCIAGpUCCpICCA8SAggPGocC4geDAgprCmEvVXNlcnMvdmJ1ZGF0aS9hbmFjb25kYTMvZW52cy9zbm93L2xpYi9weXRob24zLjExL3NpdGUtcGFja2FnZXMvbW9kaW4vbG9nZ2luZy9sb2dnZXJfZGVjb3JhdG9yLnB5EH0YFyB9KCsaEJoEDRILIl9faW5kZXhfXyIaF5oEFBISIl9fcmVkdWNlZF9fX29vczgiGheaBBQSEiJfX3Jvd19wb3NpdGlvbl9fIhocmgQZEhciX19yb3dfcG9zaXRpb25fejVpc19fIhoUmgQREg8iX19yb3dfY291bnRfXyIaHJoEGRIXIl9fcm93X3Bvc2l0aW9uX19fcDZhYiIiABqPAgqMAggQEgIIEBqBAuIH/QEKawphL1VzZXJzL3ZidWRhdGkvYW5hY29uZGEzL2VudnMvc25vdy9saWIvcHl0aG9uMy4xMS9zaXRlLXBhY2thZ2VzL21vZGluL2xvZ2dpbmcvbG9nZ2VyX2RlY29yYXRvci5weRB9GBcgfSgrEgfaAQQSAggHGhCaBA0SCyJfX2luZGV4X18iGgiaBAUSAyJBIhoImgQFEgMiQiIaCJoEBRIDIkMiGgiaBAUSAyJEIhoImgQFEgMiRSIaCJoEBRIDIkYiGgiaBAUSAyJHIhoXmgQUEhIiX19yb3dfcG9zaXRpb25fXyIaFJoEERIPIl9fcm93X2NvdW50X18iIgAa5QEK4gEIERICCBEa1wHiB9MBCmsKYS9Vc2Vycy92YnVkYXRpL2FuYWNvbmRhMy9lbnZzL3Nub3cvbGliL3B5dGhvbjMuMTEvc2l0ZS1wYWNrYWdlcy9tb2Rpbi9sb2dnaW5nL2xvZ2dlcl9kZWNvcmF0b3IucHkQfRgXIH0oKxIH2gEEEgIIDxoQmgQNEgsiX19pbmRleF9fIhoSmgQPEg0iX19yZWR1Y2VkX18iGheaBBQSEiJfX3Jvd19wb3NpdGlvbl9fIhocmgQZEhciX19yb3dfcG9zaXRpb25fX19wNmFiIiIAGoMCCoACCBISAggSGvUB4gfxAQprCmEvVXNlcnMvdmJ1ZGF0aS9hbmFjb25kYTMvZW52cy9zbm93L2xpYi9weXRob24zLjExL3NpdGUtcGFja2FnZXMvbW9kaW4vbG9nZ2luZy9sb2dnZXJfZGVjb3JhdG9yLnB5EH0YFyB9KCsSB9oBBBICCBAaFZoEEhIQIl9faW5kZXhfX18wMTlvIhoImgQFEgMiQSIaCJoEBRIDIkIiGgiaBAUSAyJDIhoImgQFEgMiRCIaCJoEBRIDIkUiGgiaBAUSAyJGIhoImgQFEgMiRyIaHJoEGRIXIl9fcm93X3Bvc2l0aW9uX19fNWw2YSIiABqGAQqDAQgTEgIIExp54gd2CmsKYS9Vc2Vycy92YnVkYXRpL2FuYWNvbmRhMy9lbnZzL3Nub3cvbGliL3B5dGhvbjMuMTEvc2l0ZS1wYWNrYWdlcy9tb2Rpbi9sb2dnaW5nL2xvZ2dlcl9kZWNvcmF0b3IucHkQfRgXIH0oKxIH2gEEEgIIESIAGoYBCoMBCBQSAggUGnniB3YKawphL1VzZXJzL3ZidWRhdGkvYW5hY29uZGEzL2VudnMvc25vdy9saWIvcHl0aG9uMy4xMS9zaXRlLXBhY2thZ2VzL21vZGluL2xvZ2dpbmcvbG9nZ2VyX2RlY29yYXRvci5weRB9GBcgfSgrEgfaAQQSAggSIgAaCgoICBUSAggVIgAaoAEKnQEIFhICCBYakgHiB44BCmsKYS9Vc2Vycy92YnVkYXRpL2FuYWNvbmRhMy9lbnZzL3Nub3cvbGliL3B5dGhvbjMuMTEvc2l0ZS1wYWNrYWdlcy9tb2Rpbi9sb2dnaW5nL2xvZ2dlcl9kZWNvcmF0b3IucHkQfRgXIH0oKxoVmgQSEhAiX19pbmRleF9fXzAxOW8iGgiaBAUSAyJDIiIA
17
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this work? The test driver here runs the unparser, so we should be seeing something that looks like the original source.

Loading
Loading