Skip to content

Commit

Permalink
Merge pull request #348 from MannLabs/liniting_rule_B
Browse files Browse the repository at this point in the history
liniting_rule_B
  • Loading branch information
mschwoer authored Oct 18, 2024
2 parents 4fc449a + 843953a commit 5b71c93
Show file tree
Hide file tree
Showing 15 changed files with 84 additions and 65 deletions.
12 changes: 11 additions & 1 deletion .secrets.baseline
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,16 @@
}
],
"results": {
".github/workflows/create_release.yml": [
{
"type": "Secret Keyword",
"filename": ".github/workflows/create_release.yml",
"hashed_secret": "3e26d6750975d678acb8fa35a0f69237881576b0",
"is_verified": false,
"line_number": 15,
"is_secret": false
}
],
"docs/workflow_mq.html": [
{
"type": "Base64 High Entropy String",
Expand All @@ -150,5 +160,5 @@
}
]
},
"generated_at": "2024-09-18T09:54:14Z"
"generated_at": "2024-10-08T15:30:41Z"
}
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,13 @@ You can run the checks yourself using:
pre-commit run --all-files
```

##### The `detect-secrets` hook fails
This is because you added some code that was identified as a potential secret.
1. Run `detect-secrets scan --exclude-files testfiles --exclude-lines '"(hash|id|image/\w+)":.*' > .secrets.baseline`
(check `.pre-commit-config.yaml` for the exact parameters)
2. Run `detect-secrets audit .secrets.baseline` and check if the detected 'secret' is actually a secret
3. Commit the latest version of `.secrets.baseline`



---
Expand Down
4 changes: 3 additions & 1 deletion alphastats/DataSet.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ def plot_volcano(
perm: int = 100,
fdr: float = 0.05,
# compare_preprocessing_modes: bool = False, # TODO reimplement
color_list: list = [],
color_list: list = None,
):
"""Plot Volcano Plot
Expand Down Expand Up @@ -360,6 +360,8 @@ def plot_volcano(
# return results
#
# else:
if color_list is None:
color_list = []
volcano_plot = VolcanoPlot(
mat=self.mat,
rawinput=self.rawinput,
Expand Down
4 changes: 1 addition & 3 deletions alphastats/gui/AlphaPeptStats.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,7 @@
_this_directory = os.path.dirname(_this_file)
icon = os.path.join(_this_directory, "alphapeptstats_logo.png")

header_html = img_center + "<img src='data:image/png;base64,{}'>".format(
img_to_bytes(icon)
)
header_html = img_center + f"<img src='data:image/png;base64,{img_to_bytes(icon)}'>"

st.markdown(
header_html,
Expand Down
2 changes: 1 addition & 1 deletion alphastats/gui/utils/ollama_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ def display_chat_history(self):
-------
None
"""
for i, message in enumerate(self.messages):
for message in self.messages:
role = message["role"].capitalize()
content = message["content"]

Expand Down
2 changes: 1 addition & 1 deletion alphastats/gui/utils/openai_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def send_message_save_thread(
plots = None
messages = client.beta.threads.messages.list(thread_id=thread_id)
st.session_state[storing_variable] = []
for num, message in enumerate(messages.data[::-1]):
for message in messages.data[::-1]:
role = message.role
if message.content:
content = message.content[0].text.value
Expand Down
12 changes: 7 additions & 5 deletions alphastats/loader/MaxQuantLoader.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,7 @@ def __init__(
intensity_column: Union[str, list] = "LFQ intensity [sample]",
index_column: str = "Protein IDs",
gene_names_column: str = "Gene names",
filter_columns: list = [
"Only identified by site",
"Reverse",
"Potential contaminant",
],
filter_columns: list = None,
confidence_column: str = "Q-value",
evidence_file=None,
sep: str = "\t",
Expand All @@ -36,6 +32,12 @@ def __init__(
sep (str, optional): separation of the input file. Defaults to "\t".
"""

if filter_columns is None:
filter_columns = [
"Only identified by site",
"Reverse",
"Potential contaminant",
]
super().__init__(file, intensity_column, index_column, sep)
self.filter_columns = filter_columns + self.filter_columns
self.confidence_column = confidence_column
Expand Down
42 changes: 26 additions & 16 deletions alphastats/multicova/multicova.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,19 +105,21 @@ def workflow_permutation_tvals(df, c1, c2, s0=1, n_perm=2, parallelize=False):
res_perm = list()
for i in np.arange(0, len(all_c_rand)):
if parallelize:
res_i = df.swifter.progress_bar(False).apply(
res_i = df.swifter.progress_bar(
False
).apply(
lambda row: perform_ttest(
row[all_c_rand[i][0 : len(c1)]],
row[all_c_rand[i][len(c1) : len(c1) + len(c2)]],
row[all_c_rand[i][0 : len(c1)]], # noqa: B023
row[all_c_rand[i][len(c1) : len(c1) + len(c2)]], # noqa: B023
s0=s0,
),
axis=1,
)
else:
res_i = df.apply(
lambda row: perform_ttest(
row[all_c_rand[i][0 : len(c1)]],
row[all_c_rand[i][len(c1) : len(c1) + len(c2)]],
row[all_c_rand[i][0 : len(c1)]], # noqa: B023
row[all_c_rand[i][len(c1) : len(c1) + len(c2)]], # noqa: B023
s0=s0,
),
axis=1,
Expand Down Expand Up @@ -312,13 +314,17 @@ def get_fdr_line(
n_x,
n_y,
plot=False,
fc_s=np.arange(0, 6, 0.01),
s_s=np.arange(0.005, 6, 0.005),
fc_s=None,
s_s=None,
):
"""
Function to get the fdr line for a volcano plot as specified tval_s0
limit, s0, n_x and n_y.
"""
if fc_s is None:
fc_s = np.arange(0, 6, 0.01)
if s_s is None:
s_s = np.arange(0.005, 6, 0.005)
pvals = np.ones(len(fc_s))
svals = np.zeros(len(fc_s))
for i in np.arange(0, len(fc_s)):
Expand Down Expand Up @@ -490,17 +496,22 @@ def regression_workflow_permutation(y, X_rand, s0):
return res_rand


def get_fdr_line_regression(
def get_fdr_line_regression( # TODO: unused
t_limits,
s0,
X,
plot=False,
fc_s=np.arange(0, 6, 0.01),
s_s=np.arange(0.005, 6, 0.005),
fc_s=None,
s_s=None,
):
"""
Function to get the fdr line for a volcano plot as specified tval_s0 limit, s0, n_x and n_y.
"""
if fc_s is None:
fc_s = np.arange(0, 6, 0.01)
if fc_s is None:
s_s = np.arange(0.005, 6, 0.005)

# pvals = [list(np.ones(len(fc_s)))] * X.shape[1]
pvals = [list(np.ones(len(fc_s))) for i in range(0, X.shape[1])]
# print(pvals)
Expand Down Expand Up @@ -565,7 +576,8 @@ def perform_ttest_getMaxS_regression(fc, s, s0, X):

def generate_perms(n, n_rand, seed=42):
"""
Generate n_rand permutations of indeces ranging from 0 to n.
Generate n_rand permutations of indices ranging from 0 to n.
# TODO: replace with something from a library
"""
np.random.seed(seed)
idx_v = np.arange(0, n)
Expand All @@ -574,18 +586,16 @@ def generate_perms(n, n_rand, seed=42):
n_rand_max = math.factorial(n) - 1
if n_rand_max <= n_rand:
print(
"{} random permutations cannot be created. The maximum of n_rand={} is used instead.".format(
n_rand, n_rand_max
)
f"{n_rand} random permutations cannot be created. The maximum of n_rand={n_rand_max} is used instead."
)
n_rand = n_rand_max
while n_rand_i < n_rand:
rand_i = list(np.random.permutation(idx_v))
if np.all(rand_i == idx_v):
next
continue
else:
if rand_i in rand_v:
next
continue
else:
rand_v.append(rand_i)
n_rand_i = len(rand_v)
Expand Down
4 changes: 3 additions & 1 deletion alphastats/plots/VolcanoPlot.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,10 @@ def __init__(
plot=True,
perm=100,
fdr=0.05,
color_list=[],
color_list=None,
):
if color_list is None:
color_list = []
self.mat: pd.DataFrame = mat
self.rawinput = rawinput
self.metadata: pd.DataFrame = metadata
Expand Down
4 changes: 2 additions & 2 deletions alphastats/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ def check_internetconnection():
try:
connection.request("HEAD", "/")
return True
except Exception:
raise ConnectionError("No internet connection available.")
except Exception as e:
raise ConnectionError("No internet connection available.") from e
finally:
connection.close()

Expand Down
18 changes: 9 additions & 9 deletions nbs/ramus_2016.ipynb

Large diffs are not rendered by default.

13 changes: 7 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,19 @@ extend-exclude = [".bumpversion.cfg", ".secrets.baseline"]
# "E",
# # Pyflakes
# "F",
# # pyupgrade
# "UP",
# pyupgrade
"UP",
# # flake8-bugbear
# "B",
"B",
# # flake8-simplify
"SIM",
# isort
"I",
]

# ignore = [
ignore = [
# "E501", # Line too long (ruff wraps code, but not docstrings)
# "B028", # No explicit `stacklevel` keyword argument found (for warnings)
"B028", # No explicit `stacklevel` keyword argument found (for warnings)
# "B905" # This causes problems in numba code: `zip()` without an explicit `strict=` parameter
# ]
"B019" # Use of `functools.lru_cache` or `functools.cache` on methods can lead to memory leaks TODO: revisit
]
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@


def get_long_description():
with open("README.md", "r") as readme_file:
with open("README.md") as readme_file:
long_description = readme_file.read()
return long_description

Expand Down
11 changes: 2 additions & 9 deletions tests/test_DataSet.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,6 @@ class BaseTestDataSet:
# this is wrapped in a nested class so it doesnt get called separatly when testing
# plus to avoid multiple inheritance
class BaseTest(unittest.TestCase):
@contextmanager
def assertNotRaises(self, exc_type):
try:
yield None
except exc_type:
raise self.failureException("{} raised".format(exc_type.__name__))

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

Expand All @@ -49,8 +42,8 @@ def __init__(self, *args, **kwargs):
self.comparison_column = None

def test_check_loader_no_error(self):
with self.assertNotRaises(ValueError):
self.obj._check_loader(loader=self.loader)
self.obj._check_loader(loader=self.loader)
# nothing raised -> ok

def test_check_loader_error_invalid_column(self):
#  invalid index column
Expand Down
12 changes: 3 additions & 9 deletions tests/test_loaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,6 @@ class BaseTestLoader:
# this is wrapped in a nested class so it doesnt get called separatly when testing
# plus to avoid multiple inheritance
class BaseTest(unittest.TestCase):
@contextmanager
def assertNotRaises(self, exc_type):
try:
yield None
except exc_type:
raise self.failureException("{} raised".format(exc_type.__name__))

def test_dataformat(self):
# check if loaded data is pandas dataframe
self.assertIsInstance(self.obj.rawinput, pd.DataFrame)
Expand All @@ -46,8 +39,9 @@ def test_check_if_columns_are_present_error(self):
def test_check_if_columns_are_present_no_error(self):
# check if columns are present
# check if error gets raised when column is not present
with self.assertNotRaises(KeyError):
self.obj._check_if_columns_are_present()
self.obj._check_if_columns_are_present()

# nothing raised -> ok

@patch("logging.Logger.warning")
def test_check_if_indexcolumn_is_unique_warning(self, mock):
Expand Down

0 comments on commit 5b71c93

Please sign in to comment.