diff --git a/.github/workflows/python-app.yml b/.github/workflows/quality.yml similarity index 60% rename from .github/workflows/python-app.yml rename to .github/workflows/quality.yml index 694f557..19ed938 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/quality.yml @@ -1,7 +1,7 @@ # This workflow will install Python dependencies, run tests and lint with a single version of Python # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python -name: Lint and Pytest +name: Code Quality on: push: @@ -19,21 +19,18 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Install poetry + run: pipx install poetry - name: Set up Python 3.11 - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: python-version: "3.11" + cache: 'poetry' # caching pip dependencies - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install flake8 pytest pennylane - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + run: poetry install - name: Lint with flake8 - run: | + run: # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + poetry run flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test with pytest - run: | - pytest + poetry run flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml new file mode 100644 index 0000000..786898d --- /dev/null +++ b/.github/workflows/testing.yml @@ -0,0 +1,32 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Testing + +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + +permissions: + contents: read + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - name: Install poetry + run: pipx install poetry + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: 'poetry' # caching pip dependencies + - name: Install dependencies + run: poetry install + - name: Test with pytest + run: poetry run pytest diff --git a/README.md b/README.md index 29e5d9c..2d1736c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # QML Essentials -[![version](https://img.shields.io/badge/version-0.1.12-green.svg)](https://ea3a0fbb-599f-4d83-86f1-0e71abe27513.ka.bw-cloud-instance.org/lc3267/quantum/) [![Pipx Status](https://servers.stroblme.de/api/badge/3/uptime/72?color=%2331c754&labelColor=%233f4850)](https://servers.stroblme.de/status/open) [![Lint and Pytest](https://github.com/cirKITers/qml-essentials/actions/workflows/python-app.yml/badge.svg)](https://github.com/cirKITers/qml-essentials/actions/workflows/python-app.yml) [![Page Build](https://github.com/cirKITers/qml-essentials/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/cirKITers/qml-essentials/actions/workflows/pages/pages-build-deployment) +[![version](https://img.shields.io/badge/version-0.1.12-green.svg)](https://ea3a0fbb-599f-4d83-86f1-0e71abe27513.ka.bw-cloud-instance.org/lc3267/quantum/) [![Pipx Status](https://servers.stroblme.de/api/badge/3/uptime/72?color=%2331c754&labelColor=%233f4850)](https://servers.stroblme.de/status/open) [![Code Quality](https://github.com/cirKITers/qml-essentials/actions/workflows/quality.yml/badge.svg)](https://github.com/cirKITers/qml-essentials/actions/workflows/quality.yml) [![Testing](https://github.com/cirKITers/qml-essentials/actions/workflows/testing.yml/badge.svg)](https://github.com/cirKITers/qml-essentials/actions/workflows/testing.yml) [![Page Build](https://github.com/cirKITers/qml-essentials/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/cirKITers/qml-essentials/actions/workflows/pages/pages-build-deployment) ## :scroll: About diff --git a/qml_essentials/model.py b/qml_essentials/model.py index cc751fa..3e78203 100644 --- a/qml_essentials/model.py +++ b/qml_essentials/model.py @@ -540,6 +540,9 @@ def _forward( else: result = result.mean(axis=0) + if len(result.shape) == 3 and result.shape[0] == 1: + result = result[0] + if cache: np.save(file_path, result) diff --git a/tests/test_entanglement.py b/tests/test_entanglement.py index c3209bc..f4242e6 100644 --- a/tests/test_entanglement.py +++ b/tests/test_entanglement.py @@ -1,8 +1,6 @@ from qml_essentials.model import Model from qml_essentials.entanglement import Entanglement -import pytest -import numpy as np import logging import math @@ -34,7 +32,6 @@ def test_entanglement() -> None: circuit_type=test_case["circuit_type"], data_reupload=True, initialization="random", - output_qubit=0, ) ent_cap = Entanglement.meyer_wallach( diff --git a/tests/test_expressiblity.py b/tests/test_expressiblity.py index e5512e1..b20d345 100644 --- a/tests/test_expressiblity.py +++ b/tests/test_expressiblity.py @@ -1,8 +1,6 @@ from qml_essentials.model import Model from qml_essentials.expressibility import Expressibility -import pytest -import numpy as np import logging import math diff --git a/tests/test_model.py b/tests/test_model.py index 7642724..3414885 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -5,6 +5,9 @@ import logging import inspect import shutil +import os +import hashlib + logger = logging.getLogger(__name__) @@ -116,44 +119,47 @@ def test_cache() -> None: except Exception as e: logger.warning(e) - test_cases = [ - { - "shots": 1024, - "execution_type": "expval", - "shape": (), - }, - { - "shots": -1, - "execution_type": "density", - "shape": (4, 4), - }, - { - "shots": 1024, - "execution_type": "probs", - "shape": (2,), - }, - ] - - for test_case in test_cases: - model = Model( - n_qubits=2, - n_layers=1, - circuit_type="Circuit_19", - data_reupload=True, - initialization="random", - output_qubit=0, - shots=test_case["shots"], - ) + model = Model( + n_qubits=2, + n_layers=1, + circuit_type="Circuit_19", + ) - result = model( - model.params, - inputs=None, - noise_params=None, - cache=True, - execution_type=test_case["execution_type"], - ) + result = model( + model.params, + inputs=None, + cache=True, + ) - assert result.shape == test_case["shape"], f"Test case: {test_case} failed" + hs = hashlib.md5( + repr( + { + "n_qubits": model.n_qubits, + "n_layers": model.n_layers, + "pqc": model.pqc.__class__.__name__, + "dru": model.data_reupload, + "params": model.params, + "noise_params": model.noise_params, + "execution_type": model.execution_type, + "inputs": None, + "output_qubit": model.output_qubit, + } + ).encode("utf-8") + ).hexdigest() + + cache_folder: str = ".cache" + if not os.path.exists(cache_folder): + raise Exception("Cache folder does not exist.") + + name: str = f"pqc_{hs}.npy" + file_path: str = os.path.join(cache_folder, name) + + if os.path.isfile(file_path): + cached_result = np.load(file_path) + + assert np.array_equal( + result, cached_result + ), "Cached result and calcualted result is not equal." def test_initialization() -> None: @@ -390,6 +396,15 @@ def test_local_and_global_meas() -> None: inputs = np.array([0.1, 0.2, 0.3]) test_cases = [ { + "inputs": None, + "execution_type": "expval", + "output_qubit": -1, + "shots": -1, + "out_shape": (2, 1), + "warning": False, + }, + { + "inputs": np.array([0.1, 0.2, 0.3]), "execution_type": "expval", "output_qubit": -1, "shots": -1, @@ -397,6 +412,7 @@ def test_local_and_global_meas() -> None: "warning": False, }, { + "inputs": np.array([0.1, 0.2, 0.3]), "execution_type": "expval", "output_qubit": 0, "shots": -1, @@ -404,6 +420,7 @@ def test_local_and_global_meas() -> None: "warning": False, }, { + "inputs": np.array([0.1, 0.2, 0.3]), "execution_type": "expval", "output_qubit": [0, 1], "shots": -1, @@ -411,6 +428,15 @@ def test_local_and_global_meas() -> None: "warning": False, }, { + "inputs": None, + "execution_type": "density", + "output_qubit": -1, + "shots": -1, + "out_shape": (4, 4), + "warning": False, + }, + { + "inputs": np.array([0.1, 0.2, 0.3]), "execution_type": "density", "output_qubit": -1, "shots": -1, @@ -418,6 +444,7 @@ def test_local_and_global_meas() -> None: "warning": False, }, { + "inputs": np.array([0.1, 0.2, 0.3]), "execution_type": "density", "output_qubit": 0, "shots": -1, @@ -425,6 +452,7 @@ def test_local_and_global_meas() -> None: "warning": True, }, { + "inputs": np.array([0.1, 0.2, 0.3]), "execution_type": "probs", "output_qubit": -1, "shots": 1024, @@ -432,6 +460,7 @@ def test_local_and_global_meas() -> None: "warning": False, }, { + "inputs": np.array([0.1, 0.2, 0.3]), "execution_type": "probs", "output_qubit": 0, "shots": 1024, @@ -439,6 +468,7 @@ def test_local_and_global_meas() -> None: "warning": False, }, { + "inputs": np.array([0.1, 0.2, 0.3]), "execution_type": "probs", "output_qubit": [0, 1], "shots": 1024, @@ -461,7 +491,7 @@ def test_local_and_global_meas() -> None: with pytest.warns(UserWarning): out = model( model.params, - inputs=inputs, + inputs=test_case["inputs"], noise_params=None, cache=False, execution_type=test_case["execution_type"], @@ -469,7 +499,7 @@ def test_local_and_global_meas() -> None: else: out = model( model.params, - inputs=inputs, + inputs=test_case["inputs"], noise_params=None, cache=False, execution_type=test_case["execution_type"], @@ -486,17 +516,19 @@ def test_parity() -> None: n_qubits=2, n_layers=1, circuit_type="Circuit_1", - output_qubit=[0, 1], + output_qubit=[0, 1], # parity ) model_b = Model( n_qubits=2, n_layers=1, circuit_type="Circuit_1", - output_qubit=-1, + output_qubit=-1, # individual ) - result_a = model_a(model_a.params, inputs=None, cache=False, force_mean=True) - result_b = model_b(model_a.params, inputs=None, cache=False) # use same params! + result_a = model_a(params=model_a.params, inputs=None, force_mean=True) + result_b = model_b( + params=model_a.params, inputs=None, force_mean=True + ) # use same params! assert not np.allclose( result_a, result_b