Skip to content

Commit

Permalink
Merge pull request #275 from MannLabs/na_bugfix
Browse files Browse the repository at this point in the history
fix for the zeros / NA discrepancy
  • Loading branch information
boopthesnoot authored May 22, 2024
2 parents 89843be + 4eab905 commit dc517ed
Show file tree
Hide file tree
Showing 4 changed files with 245 additions and 191 deletions.
173 changes: 100 additions & 73 deletions alphastats/DataSet_Preprocess.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
from random import random
import pandas as pd
import sklearn
import itertools
import logging

import numpy as np
import pandas as pd
import sklearn
import sklearn.ensemble
import sklearn.impute
from alphastats.utils import ignore_warning
from sklearn.experimental import enable_iterative_imputer
import itertools
import streamlit as st

from sklearn.experimental import enable_iterative_imputer #noqa
from alphastats.utils import ignore_warning


class Preprocess:
Expand All @@ -30,45 +31,58 @@ def preprocess_print_info(self):
print(pd.DataFrame(self.preprocessing_info.items()))

def _remove_na_values(self, cut_off):
if (
self.preprocessing_info.get("Missing values were removed")
and self.preprocessing_info.get("Data completeness cut-off") == cut_off
):
logging.info("Missing values have already been filtered.")
st.warning(
"Missing values have already been filtered. To apply another cutoff, reset preprocessing."
)
return
cut = 1 - cut_off
limit = self.mat.shape[0] * cut


num_samples, num_proteins = self.mat.shape
limit = num_samples * cut

self.mat.replace(0, np.nan, inplace=True)
keep_list = list()
invalid = 0
for column_name in self.mat.columns:
column = self.mat[column_name]
# Get the count of Zeros in column
count = (column == 0).sum()
count = column.isna().sum()
try:
count = count.item()
if isinstance(count, int):
if count < limit:
keep_list += [column_name]

except ValueError:
invalid +=1
invalid += 1
continue

self.mat= self.mat[keep_list]
self.mat = self.mat[keep_list]

self.preprocessing_info.update(
{"Data completeness cut-off": cut_off}
{
"Number of removed ProteinGroups due to data completeness cutoff": num_proteins
- self.mat.shape[1],
"Missing values were removed": True,
"Data completeness cut-off": cut_off,
}
)
percentage = cut_off * 100
print(f"Proteins with a data completeness across all samples of less than {percentage} % have been removed.")


def _filter(self):
if len(self.filter_columns) == 0:
logging.info("No columns to filter.")
return

if self.preprocessing_info.get("Contaminations have been removed") == True:
if self.preprocessing_info.get("Contaminations have been removed"):
logging.info("Contaminatons have already been filtered.")
return

#  print column names with contamination
protein_groups_to_remove = self.rawinput[
(self.rawinput[self.filter_columns] == True).any(axis=1)
self.rawinput[self.filter_columns].any(axis=1)
][self.index_column].tolist()

protein_groups_to_remove = list(
Expand Down Expand Up @@ -105,15 +119,18 @@ def _imputation(self, method: str):
logging.info(
f" {len(protein_group_na)} Protein Groups were removed due to missing values."
)

logging.info("Imputing data...")

if method == "mean":
imp = sklearn.impute.SimpleImputer(missing_values=np.nan, strategy="mean", keep_empty_features=True)
imp = sklearn.impute.SimpleImputer(
missing_values=np.nan, strategy="mean", keep_empty_features=True
)
imputation_array = imp.fit_transform(self.mat.values)

elif method == "median":
imp = sklearn.impute.SimpleImputer(missing_values=np.nan, strategy="median", keep_empty_features=True)
imp = sklearn.impute.SimpleImputer(
missing_values=np.nan, strategy="median", keep_empty_features=True
)
imputation_array = imp.fit_transform(self.mat.values)

elif method == "knn":
Expand All @@ -123,25 +140,11 @@ def _imputation(self, method: str):
imputation_array = imp.fit_transform(self.mat.values)

elif method == "randomforest":
randomforest = sklearn.ensemble.RandomForestRegressor(
max_depth=10,
bootstrap=True,
max_samples=0.5,
n_jobs=2,
random_state=0,
verbose=0, #  random forest takes a while print progress
imp = sklearn.ensemble.HistGradientBoostingRegressor(
max_depth=10, max_iter=100, random_state=0
)
imp = sklearn.impute.IterativeImputer(
random_state=0, estimator=randomforest
)

# the random forest imputer doesnt work with float32/float16..
#  so the values are multiplied and converted to integers
array_multi_mio = self.mat.values * 1000000
array_int = array_multi_mio.astype("int")

imputation_array = imp.fit_transform(array_int)
imputation_array = imputation_array / 1000000
imp = sklearn.impute.IterativeImputer(random_state=0, estimator=imp)
imputation_array = imp.fit_transform(self.mat.values)

else:
raise ValueError(
Expand All @@ -155,26 +158,43 @@ def _imputation(self, method: str):
)
self.preprocessing_info.update({"Imputation": method})

def _linear_normalization(self, dataframe: pd.DataFrame):
"""Normalize data using l2 norm without breaking when encoutering nones
l2 = sqrt(sum(x**2))
Args:
dataframe (pd.DataFrame): dataframe to normalize
Returns:
np.array: normalized np.array
"""
square_sum_per_row = dataframe.pow(2).sum(axis=1, skipna=True)

l2_norms = np.sqrt(square_sum_per_row)
normalized_vals = dataframe.div(l2_norms.replace(0, 1), axis=0)
return normalized_vals.values

@ignore_warning(UserWarning)
@ignore_warning(RuntimeWarning)
def _normalization(self, method: str):

if method == "zscore":
scaler = sklearn.preprocessing.StandardScaler()
normalized_array = scaler.fit_transform(self.mat.values)
normalized_array = scaler.fit_transform(
self.mat.values.transpose()
).transpose()

elif method == "quantile":
qt = sklearn.preprocessing.QuantileTransformer(random_state=0)
normalized_array = qt.fit_transform(self.mat.values)
normalized_array = qt.fit_transform(self.mat.values.transpose()).transpose()

elif method == "linear":
normalized_array = sklearn.preprocessing.normalize(
self.mat.values, norm="l2"
)
normalized_array = self._linear_normalization(self.mat)

elif method == "vst":
scaler = sklearn.preprocessing.PowerTransformer(standardize=False)
normalized_array = scaler.fit_transform(self.mat.values)
minmax = sklearn.preprocessing.MinMaxScaler()
scaler = sklearn.preprocessing.PowerTransformer()
minmaxed_array = minmax.fit_transform(self.mat.values.transpose())
normalized_array = scaler.fit_transform(minmaxed_array).transpose()

else:
raise ValueError(
Expand All @@ -189,20 +209,19 @@ def _normalization(self, method: str):
self.preprocessing_info.update({"Normalization": method})

def reset_preprocessing(self):
""" Reset all preprocessing steps
"""
#  reset all preprocessing steps
"""Reset all preprocessing steps"""
self.create_matrix()
print("All preprocessing steps are reset.")

@ignore_warning(RuntimeWarning)
def _compare_preprocessing_modes(self, func, params_for_func) -> list:
dataset = self
imputation_methods = ["mean", "median", "knn", "randomforest"]
normalization_methods = ["vst","zscore", "quantile" ]

preprocessing_modes = list(itertools.product(normalization_methods, imputation_methods))
normalization_methods = ["vst", "zscore", "quantile"]

preprocessing_modes = list(
itertools.product(normalization_methods, imputation_methods)
)

results_list = []

Expand All @@ -212,7 +231,9 @@ def _compare_preprocessing_modes(self, func, params_for_func) -> list:
for preprocessing_mode in preprocessing_modes:
# reset preprocessing
dataset.reset_preprocessing()
print(f"Normalization {preprocessing_mode[0]}, Imputation {str(preprocessing_mode[1])}")
print(
f"Normalization {preprocessing_mode[0]}, Imputation {str(preprocessing_mode[1])}"
)
dataset.mat.replace([np.inf, -np.inf], np.nan, inplace=True)

dataset.preprocess(
Expand All @@ -223,7 +244,7 @@ def _compare_preprocessing_modes(self, func, params_for_func) -> list:

res = func(**params_for_func)
results_list.append(res)

print("\t")

return results_list
Expand All @@ -232,29 +253,31 @@ def _log2_transform(self):
self.mat = np.log2(self.mat + 0.1)
self.preprocessing_info.update({"Log2-transformed": True})
print("Data has been log2-transformed.")
def batch_correction(self, batch:str):

def batch_correction(self, batch: str):
"""Correct for technical bias/batch effects
Behdenna A, Haziza J, Azencot CA and Nordor A. (2020) pyComBat, a Python tool for batch effects correction in high-throughput molecular data using empirical Bayes methods. bioRxiv doi: 10.1101/2020.03.17.995431
Args:
batch (str): column name in the metadata describing the different batches
"""
import combat
from combat.pycombat import pycombat

data = self.mat.transpose()
series_of_batches = self.metadata.set_index(self.sample).reindex(data.columns.to_list())[batch]
series_of_batches = self.metadata.set_index(self.sample).reindex(
data.columns.to_list()
)[batch]
self.mat = pycombat(data=data, batch=series_of_batches).transpose()

@ignore_warning(RuntimeWarning)
def preprocess(
self,
log2_transform: bool=True,
remove_contaminations: bool=False,
subset: bool=False,
data_completeness: float=0,
normalization: str=None,
imputation: str=None,
remove_samples: list=None,
log2_transform: bool = True,
remove_contaminations: bool = False,
subset: bool = False,
data_completeness: float = 0,
normalization: str = None,
imputation: str = None,
remove_samples: list = None,
):
"""Preprocess Protein data
Expand Down Expand Up @@ -300,15 +323,14 @@ def preprocess(
"""
if remove_contaminations:
self._filter()

if remove_samples is not None:
self._remove_sampels(sample_list=remove_samples)

if subset:
self.mat = self._subset()


if data_completeness> 0:
if data_completeness > 0:
self._remove_na_values(cut_off=data_completeness)

if log2_transform and self.preprocessing_info.get("Log2-transformed") is False:
Expand All @@ -317,9 +339,14 @@ def preprocess(
if normalization is not None:
self._normalization(method=normalization)
self.mat = self.mat.replace([np.inf, -np.inf], np.nan)

if imputation is not None:
self._imputation(method=imputation)

self.mat = self.mat.loc[:, (self.mat != 0).any(axis=0)]
self.preprocessing_info.update(
{
"Matrix: Number of ProteinIDs/ProteinGroups": self.mat.shape[1],
}
)
self.preprocessed = True
Loading

0 comments on commit dc517ed

Please sign in to comment.