Skip to content

Commit

Permalink
Merge pull request #63 from bigbio/dev
Browse files Browse the repository at this point in the history
Minor changes in the README
  • Loading branch information
ypriverol authored Jun 2, 2024
2 parents 4d639ad + 75b3382 commit 20f2a59
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 16 deletions.
3 changes: 1 addition & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,6 @@ Options:

#### Compute TPA

# TODO @PingZheng: Can you confirm that this command works?

```asciidoc
python compute_tpa --fasta Homo-sapiens-uniprot-reviewed-contaminants-decoy-202210.fasta --organism 'human' --peptides PXD003947-peptides.csv --ruler --ploidy 2 --cpc 200 --output PXD003947-tpa.tsv --verbose
```
Expand Down Expand Up @@ -250,3 +248,4 @@ Wang H, Dai C, Pfeuffer J, Sachsenberg T, Sanchez A, Bai M, Perez-Riverol Y. Tis
- Julianus Pfeuffer
- Yasset Perez-Riverol
- Hong Wang
- Ping Zheng
14 changes: 7 additions & 7 deletions ibaqpy/ibaq/combiner.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

class Combiner:
def __init__(
self, data_folder: os.PathLike, covariate: str = None, organism: str = "HUMAN"
self, data_folder: os.PathLike, covariate: str = None, organism: str = "HUMAN"
):
"""Generate concated IbaqNorm and metadata."""
self.df_pca = compute_pca(self.df_corrected.T, n_components=5)
Expand Down Expand Up @@ -114,11 +114,11 @@ def imputer(self, covariate_to_keep: list = None):
print(self.df.head)

def outlier_removal(
self,
n_components: int = None,
min_cluster_size: int = None,
min_samples_num: int = None,
n_iter: int = None,
self,
n_components: int = None,
min_cluster_size: int = None,
min_samples_num: int = None,
n_iter: int = None,
):
logger.info("Removing outliers from imputed data ...")
# Apply iterative outlier removal on imputed data
Expand Down Expand Up @@ -164,7 +164,7 @@ def outlier_removal(
)

def batch_correction(
self, n_components: int = None, tissue_parts_to_keep: int = None
self, n_components: int = None, tissue_parts_to_keep: int = None
):
logger.info("Applying batch effect correction ...")
# Plot PCA of uncorrected imputed data
Expand Down
14 changes: 7 additions & 7 deletions ibaqpy/ibaq/peptide_normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,18 +540,18 @@ def peptide_normalization(
raise FileNotFoundError("The file does not exist.")

print("Loading data..")
F = Feature(parquet)
feature = Feature(parquet)
if sdrf:
technical_repetitions, label, sample_names, choice = analyse_sdrf(sdrf)
else:
technical_repetitions, label, sample_names, choice = F.experimental_inference
low_frequency_peptides = F.low_frequency_peptides
technical_repetitions, label, sample_names, choice = feature.experimental_inference
low_frequency_peptides = feature.low_frequency_peptides
header = False
if not skip_normalization and pnmethod == "globalMedian":
med_map = F.get_median_map()
med_map = feature.get_median_map()
elif not skip_normalization and pnmethod == "conditionMedian":
med_map = F.get_median_map_to_condition()
for samples, df in F.iter_samples():
med_map = feature.get_median_map_to_condition()
for samples, df in feature.iter_samples():
for sample in samples:
# Perform data preprocessing on every sample
print(f"{str(sample).upper()}: Data preprocessing...")
Expand Down Expand Up @@ -638,4 +638,4 @@ def peptide_normalization(
header = True

if save_parquet:
F.csv2parquet(output)
feature.csv2parquet(output)

0 comments on commit 20f2a59

Please sign in to comment.