Skip to content

Commit

Permalink
Allow processing of duplicated data columns
Browse files Browse the repository at this point in the history
  • Loading branch information
olejandro committed Feb 28, 2024
1 parent eac45c0 commit fab9f98
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 2 deletions.
1 change: 1 addition & 0 deletions xl2times/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,6 +456,7 @@ def process_flexible_import_table(
known_columns = config.known_columns[datatypes.Tag.fi_t]
data_columns = [x for x in df.columns if x not in known_columns]

# TODO: Replace this with something similar to know columns from config
# Populate index columns
index_columns = [
"region",
Expand Down
10 changes: 8 additions & 2 deletions xl2times/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,14 @@ def explode(df, data_columns):
:return: Tuple with the exploded dataframe and a Series of the original
column name for each value in each new row.
"""
data = df[data_columns].values.tolist()
# Handle duplicate columns (https://pandas.pydata.org/docs/user_guide/duplicates.html)
if len(set(data_columns)) < len(data_columns):
cols = df.columns.to_list()
data_cols_idx = [idx for idx, val in enumerate(cols) if val in data_columns]
data = df.iloc[:, data_cols_idx].values.tolist()
else:
data = df[data_columns].values.tolist()

other_columns = [
colname for colname in df.columns.values if colname not in data_columns
]
Expand All @@ -69,7 +76,6 @@ def explode(df, data_columns):
df = df.assign(value=data)
nrows = df.shape[0]
df = df.explode(value_column, ignore_index=True)

names = pd.Series(data_columns * nrows, index=df.index, dtype=str)
# Remove rows with no VALUE
index = df[value_column].notna()
Expand Down

0 comments on commit fab9f98

Please sign in to comment.