Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: apply flake8-pytest-style linter rules #8307

Merged
merged 5 commits into from
Sep 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions api/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ select = [
"FURB", # refurb rules
"I", # isort rules
"N", # pep8-naming
"PT", # flake8-pytest-style rules
"RUF019", # unnecessary-key-check
"RUF100", # unused-noqa
"RUF101", # redirected-noqa
Expand Down Expand Up @@ -50,6 +51,7 @@ ignore = [
"B905", # zip-without-explicit-strict
"N806", # non-lowercase-variable-in-function
"N815", # mixed-case-variable-in-class-scope
"PT011", # pytest-raises-too-broad
"SIM102", # collapsible-if
"SIM103", # needless-bool
"SIM105", # suppressible-exception
Expand Down
12 changes: 6 additions & 6 deletions api/tests/integration_tests/model_runtime/xinference/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from tests.integration_tests.model_runtime.__mock.xinference import setup_xinference_mock


@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True)
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True)
def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference_mock):
model = XinferenceAILargeLanguageModel()

Expand All @@ -45,7 +45,7 @@ def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference
)


@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True)
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True)
def test_invoke_chat_model(setup_openai_mock, setup_xinference_mock):
model = XinferenceAILargeLanguageModel()

Expand Down Expand Up @@ -75,7 +75,7 @@ def test_invoke_chat_model(setup_openai_mock, setup_xinference_mock):
assert response.usage.total_tokens > 0


@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True)
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True)
def test_invoke_stream_chat_model(setup_openai_mock, setup_xinference_mock):
model = XinferenceAILargeLanguageModel()

Expand Down Expand Up @@ -236,7 +236,7 @@ def test_invoke_stream_chat_model(setup_openai_mock, setup_xinference_mock):
# assert response.message.tool_calls[0].function.name == 'get_current_weather'


@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True)
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True)
def test_validate_credentials_for_generation_model(setup_openai_mock, setup_xinference_mock):
model = XinferenceAILargeLanguageModel()

Expand All @@ -261,7 +261,7 @@ def test_validate_credentials_for_generation_model(setup_openai_mock, setup_xinf
)


@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True)
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True)
def test_invoke_generation_model(setup_openai_mock, setup_xinference_mock):
model = XinferenceAILargeLanguageModel()

Expand All @@ -286,7 +286,7 @@ def test_invoke_generation_model(setup_openai_mock, setup_xinference_mock):
assert response.usage.total_tokens > 0


@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True)
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True)
def test_invoke_stream_generation_model(setup_openai_mock, setup_xinference_mock):
model = XinferenceAILargeLanguageModel()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,17 @@ def test_loading_subclass_from_source():
module = load_single_subclass_from_source(
module_name="ChildClass", script_path=os.path.join(current_path, "child_class.py"), parent_type=ParentClass
)
assert module and module.__name__ == "ChildClass"
assert module
assert module.__name__ == "ChildClass"


def test_load_import_module_from_source():
current_path = os.getcwd()
module = import_module_from_source(
module_name="ChildClass", py_file_path=os.path.join(current_path, "child_class.py")
)
assert module and module.__name__ == "ChildClass"
assert module
assert module.__name__ == "ChildClass"


def test_lazy_loading_subclass_from_source():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def setup_method(self):
self.vector._client = MagicMock()

@pytest.mark.parametrize(
"search_response, expected_length, expected_doc_id",
("search_response", "expected_length", "expected_doc_id"),
[
(
{
Expand Down
2 changes: 1 addition & 1 deletion api/tests/unit_tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
CACHED_APP.config.update({"TESTING": True})


@pytest.fixture()
@pytest.fixture
def app() -> Flask:
return CACHED_APP

Expand Down
8 changes: 4 additions & 4 deletions api/tests/unit_tests/core/helper/test_ssrf_proxy.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import random
from unittest.mock import MagicMock, patch

import pytest

from core.helper.ssrf_proxy import SSRF_DEFAULT_MAX_RETRIES, STATUS_FORCELIST, make_request


Expand All @@ -22,11 +24,9 @@ def test_retry_exceed_max_retries(mock_request):
side_effects = [mock_response] * SSRF_DEFAULT_MAX_RETRIES
mock_request.side_effect = side_effects

try:
with pytest.raises(Exception) as e:
make_request("GET", "http://example.com", max_retries=SSRF_DEFAULT_MAX_RETRIES - 1)
raise AssertionError("Expected Exception not raised")
except Exception as e:
assert str(e) == f"Reached maximum retries ({SSRF_DEFAULT_MAX_RETRIES - 1}) for URL http://example.com"
assert str(e.value) == f"Reached maximum retries ({SSRF_DEFAULT_MAX_RETRIES - 1}) for URL http://example.com"


@patch("httpx.request")
Expand Down
14 changes: 5 additions & 9 deletions api/tests/unit_tests/libs/test_email.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import pytest

from libs.helper import email


Expand All @@ -9,17 +11,11 @@ def test_email_with_valid_email():


def test_email_with_invalid_email():
try:
with pytest.raises(ValueError, match="invalid_email is not a valid email."):
email("invalid_email")
except ValueError as e:
assert str(e) == "invalid_email is not a valid email."

try:
with pytest.raises(ValueError, match="@example.com is not a valid email."):
email("@example.com")
except ValueError as e:
assert str(e) == "@example.com is not a valid email."

try:
with pytest.raises(ValueError, match="()@example.com is not a valid email."):
email("()@example.com")
except ValueError as e:
assert str(e) == "()@example.com is not a valid email."