Skip to content

Commit

Permalink
fix(api): add runner load error handling to analyze cli (#15400)
Browse files Browse the repository at this point in the history
Closes AUTH-486

# Overview

Fixes the issues caught in #15211 

Previously, RTP validation occurred as part of a simulating runner's run
process. This run process executes the protocol inside a separate task,
and any errors raised while running the task are wrapped and presented
in the 'Rune result" as an analysis error.

Now that RTP validation is happening as part of the runner load process,
we have to handle the error raised in the load process in a similar way
as well. This PR adds this missing error handling in `analyze` cli
function.

# Test Plan

- [ ] Upload a protocol with incorrect RTP definition and verify that
the in-app analysis fails with the correct error and no traceback in the
error pop-up window.
- [ ] Verify that the static data of the above protocol- metadata,
requirements and files are populated in the failed analysis
- [ ] Verify that any other, non-RTP-related errors raised during the
runner load process are handled just like they would have in the
previous stable software.

# Risk assessment

Medium. Updates a critical part of in-app analysis, but is a fix and is
isolated to error handling of `runner.load` only.
  • Loading branch information
sanni-t authored Jun 13, 2024
1 parent 87a4756 commit bb1bc23
Show file tree
Hide file tree
Showing 2 changed files with 223 additions and 3 deletions.
85 changes: 82 additions & 3 deletions api/src/opentrons/cli/analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
import logging
import sys

from opentrons.protocol_engine.types import RunTimeParameter
from opentrons.protocol_engine.types import RunTimeParameter, EngineStatus
from opentrons.protocols.api_support.types import APIVersion
from opentrons.protocol_reader import (
ProtocolReader,
Expand All @@ -33,18 +33,32 @@
ProtocolFilesInvalidError,
ProtocolSource,
)
from opentrons.protocol_runner import create_simulating_runner, RunResult
from opentrons.protocol_runner import (
create_simulating_runner,
RunResult,
PythonAndLegacyRunner,
JsonRunner,
)
from opentrons.protocol_engine import (
Command,
ErrorOccurrence,
LoadedLabware,
LoadedPipette,
LoadedModule,
Liquid,
StateSummary,
)

from opentrons_shared_data.robot.dev_types import RobotType

from opentrons_shared_data.errors import ErrorCodes
from opentrons_shared_data.errors.exceptions import (
EnumeratedError,
PythonException,
)

from opentrons.protocols.parse import PythonParseMode

OutputKind = Literal["json", "human-json"]


Expand Down Expand Up @@ -197,12 +211,77 @@ def _get_return_code(analysis: RunResult) -> int:
return 0


class UnexpectedAnalysisError(EnumeratedError):
"""An error raised while setting up the runner for analysis."""

def __init__(
self,
message: Optional[str] = None,
wrapping: Optional[Sequence[Union[EnumeratedError, Exception]]] = None,
) -> None:
"""Build a UnexpectedAnalysisError exception."""

def _convert_exc() -> Iterator[EnumeratedError]:
if not wrapping:
return
for exc in wrapping:
if isinstance(exc, EnumeratedError):
yield exc
else:
yield PythonException(exc)

super().__init__(
code=ErrorCodes.GENERAL_ERROR,
message=message,
wrapping=[e for e in _convert_exc()],
)


async def _do_analyze(protocol_source: ProtocolSource) -> RunResult:

runner = await create_simulating_runner(
robot_type=protocol_source.robot_type, protocol_config=protocol_source.config
)
return await runner.run(deck_configuration=[], protocol_source=protocol_source)

try:
if isinstance(runner, PythonAndLegacyRunner):
await runner.load(
protocol_source=protocol_source,
python_parse_mode=PythonParseMode.NORMAL,
run_time_param_values=None,
)
elif isinstance(runner, JsonRunner):
await runner.load(protocol_source=protocol_source)
except Exception as error:
err_id = "analysis-setup-error"
err_created_at = datetime.now(tz=timezone.utc)
if isinstance(error, EnumeratedError):
error_occ = ErrorOccurrence.from_failed(
id=err_id, createdAt=err_created_at, error=error
)
else:
enumerated_wrapper = UnexpectedAnalysisError(
message=str(error),
wrapping=[error],
)
error_occ = ErrorOccurrence.from_failed(
id=err_id, createdAt=err_created_at, error=enumerated_wrapper
)
analysis = RunResult(
commands=[],
state_summary=StateSummary(
errors=[error_occ],
status=EngineStatus.IDLE,
labware=[],
pipettes=[],
modules=[],
labwareOffsets=[],
liquids=[],
),
parameters=[],
)
return analysis
return await runner.run(deck_configuration=[])


async def _analyze(
Expand Down
141 changes: 141 additions & 0 deletions api/tests/opentrons/cli/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,3 +260,144 @@ def test_python_error_line_numbers(
assert result.json_output is not None
[error] = result.json_output["errors"]
assert error["detail"] == expected_detail


@pytest.mark.parametrize("output", ["--json-output", "--human-json-output"])
def test_run_time_parameter_error(
tmp_path: Path,
output: str,
) -> None:
"""Test that an RTP validation error is shown correctly in analysis result.
Also verify that analysis result contains all static data about the protocol.
"""
python_protocol_source = textwrap.dedent(
# Raises an exception during runner load.
"""\
requirements = {"robotType": "OT-2", "apiLevel": "2.18"} # line 1
# line 2
def add_parameters(parameters): # line 3
# No default value specified # line 4
parameters.add_bool( # line 5
display_name="Dry Run",
variable_name="dry_run",
)
def run(protocol):
pass
"""
)
protocol_source_file = tmp_path / "protocol.py"
protocol_source_file.write_text(python_protocol_source, encoding="utf-8")
result = _get_analysis_result([protocol_source_file], output)

assert result.exit_code == 0

assert result.json_output is not None
assert result.json_output["robotType"] == "OT-2 Standard"
assert result.json_output["pipettes"] == []
assert result.json_output["commands"] == []
assert result.json_output["labware"] == []
assert result.json_output["liquids"] == []
assert result.json_output["modules"] == []
assert result.json_output["config"] == {
"apiVersion": [2, 18],
"protocolType": "python",
}
assert result.json_output["files"] == [{"name": "protocol.py", "role": "main"}]
[error] = result.json_output["errors"]
assert error["detail"] == (
"TypeError [line 5]: ParameterContext.add_bool() missing 1"
" required positional argument: 'default'"
)


@pytest.mark.parametrize("output", ["--json-output", "--human-json-output"])
def test_unexpected_error(
tmp_path: Path,
output: str,
) -> None:
"""Test that an unexpected error raised from outside opentrons functions is handled correctly."""
python_protocol_source = textwrap.dedent(
# Raises an exception before runner load.
"""\
requirements = {"robotType": "OT-2", "apiLevel": "2.18"} # line 1
x + 1 = 0 # line 2
def add_parameters(parameters):
parameters.add_bool()
def run(protocol):
pass
"""
)
protocol_source_file = tmp_path / "protocol.py"
protocol_source_file.write_text(python_protocol_source, encoding="utf-8")
result = _get_analysis_result([protocol_source_file], output)

assert result.exit_code != 0
assert result.stdout_stderr == (
"Error: cannot assign to expression here."
" Maybe you meant '==' instead of '='? (protocol.py, line 2)\n"
)


@pytest.mark.parametrize("output", ["--json-output", "--human-json-output"])
def test_unexpected_runner_load_error(
tmp_path: Path,
output: str,
) -> None:
"""Test that an error raised during runner load is handled properly.
Also verify that analysis result contains all static data about the protocol.
"""
python_protocol_source = textwrap.dedent(
# Raises an exception during runner load.
"""\
requirements = {"apiLevel": "2.18"} # line 1
call_a_non_existent_func() # line 2
def add_parameters(parameters): # line 4
parameters.add_bool()
def run(protocol):
pass
"""
)
protocol_source_file = tmp_path / "protocol.py"
protocol_source_file.write_text(python_protocol_source, encoding="utf-8")
result = _get_analysis_result([protocol_source_file], output)

assert result.exit_code == 0

assert result.json_output is not None
assert result.json_output["robotType"] == "OT-2 Standard"
assert result.json_output["pipettes"] == []
assert result.json_output["commands"] == []
assert result.json_output["config"] == {
"apiVersion": [2, 18],
"protocolType": "python",
}
assert result.json_output["files"] == [{"name": "protocol.py", "role": "main"}]
[error] = result.json_output["errors"]
assert error["detail"] == "name 'call_a_non_existent_func' is not defined"
assert error["errorCode"] == "4000"
assert error["errorType"] == "UnexpectedAnalysisError"


@pytest.mark.parametrize("output", ["--json-output", "--human-json-output"])
def test_analyze_json_protocol(
tmp_path: Path,
output: str,
) -> None:
"""Test that a json protocol analyzes correctly."""
json_file = (
Path(__file__).parents[4]
/ "shared-data"
/ "protocol"
/ "fixtures"
/ "8"
/ "simpleV8.json"
)
result = _get_analysis_result([json_file], output)

assert result.exit_code == 0
op = result.json_output
assert op is not None
assert len(op["commands"]) == 27

0 comments on commit bb1bc23

Please sign in to comment.