Skip to content

Commit

Permalink
Merge branch 'tests' into ci
Browse files Browse the repository at this point in the history
  • Loading branch information
Melvin Strobl committed Sep 23, 2024
2 parents 408ac0d + 81854c4 commit 7ca2ce9
Show file tree
Hide file tree
Showing 7 changed files with 119 additions and 60 deletions.
21 changes: 9 additions & 12 deletions .github/workflows/python-app.yml → .github/workflows/quality.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python

name: Lint and Pytest
name: Code Quality

on:
push:
Expand All @@ -19,21 +19,18 @@ jobs:

steps:
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry
- name: Set up Python 3.11
uses: actions/setup-python@v3
uses: actions/setup-python@v5
with:
python-version: "3.11"
cache: 'poetry' # caching pip dependencies
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest pennylane
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
run: poetry install
- name: Lint with flake8
run: |
run:
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
poetry run flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest
poetry run flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
32 changes: 32 additions & 0 deletions .github/workflows/testing.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python

name: Testing

on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]

permissions:
contents: read

jobs:
build:

runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: "3.11"
cache: 'poetry' # caching pip dependencies
- name: Install dependencies
run: poetry install
- name: Test with pytest
run: poetry run pytest
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# QML Essentials

[![version](https://img.shields.io/badge/version-0.1.12-green.svg)](https://ea3a0fbb-599f-4d83-86f1-0e71abe27513.ka.bw-cloud-instance.org/lc3267/quantum/) [![Pipx Status](https://servers.stroblme.de/api/badge/3/uptime/72?color=%2331c754&labelColor=%233f4850)](https://servers.stroblme.de/status/open) [![Lint and Pytest](https://github.com/cirKITers/qml-essentials/actions/workflows/python-app.yml/badge.svg)](https://github.com/cirKITers/qml-essentials/actions/workflows/python-app.yml) [![Page Build](https://github.com/cirKITers/qml-essentials/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/cirKITers/qml-essentials/actions/workflows/pages/pages-build-deployment)
[![version](https://img.shields.io/badge/version-0.1.12-green.svg)](https://ea3a0fbb-599f-4d83-86f1-0e71abe27513.ka.bw-cloud-instance.org/lc3267/quantum/) [![Pipx Status](https://servers.stroblme.de/api/badge/3/uptime/72?color=%2331c754&labelColor=%233f4850)](https://servers.stroblme.de/status/open) [![Code Quality](https://github.com/cirKITers/qml-essentials/actions/workflows/quality.yml/badge.svg)](https://github.com/cirKITers/qml-essentials/actions/workflows/quality.yml) [![Testing](https://github.com/cirKITers/qml-essentials/actions/workflows/testing.yml/badge.svg)](https://github.com/cirKITers/qml-essentials/actions/workflows/testing.yml) [![Page Build](https://github.com/cirKITers/qml-essentials/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/cirKITers/qml-essentials/actions/workflows/pages/pages-build-deployment)

## :scroll: About

Expand Down
3 changes: 3 additions & 0 deletions qml_essentials/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,6 +540,9 @@ def _forward(
else:
result = result.mean(axis=0)

if len(result.shape) == 3 and result.shape[0] == 1:
result = result[0]

if cache:
np.save(file_path, result)

Expand Down
3 changes: 0 additions & 3 deletions tests/test_entanglement.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
from qml_essentials.model import Model
from qml_essentials.entanglement import Entanglement

import pytest
import numpy as np
import logging
import math

Expand Down Expand Up @@ -34,7 +32,6 @@ def test_entanglement() -> None:
circuit_type=test_case["circuit_type"],
data_reupload=True,
initialization="random",
output_qubit=0,
)

ent_cap = Entanglement.meyer_wallach(
Expand Down
2 changes: 0 additions & 2 deletions tests/test_expressiblity.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
from qml_essentials.model import Model
from qml_essentials.expressibility import Expressibility

import pytest
import numpy as np
import logging
import math

Expand Down
116 changes: 74 additions & 42 deletions tests/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
import logging
import inspect
import shutil
import os
import hashlib


logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -116,44 +119,47 @@ def test_cache() -> None:
except Exception as e:
logger.warning(e)

test_cases = [
{
"shots": 1024,
"execution_type": "expval",
"shape": (),
},
{
"shots": -1,
"execution_type": "density",
"shape": (4, 4),
},
{
"shots": 1024,
"execution_type": "probs",
"shape": (2,),
},
]

for test_case in test_cases:
model = Model(
n_qubits=2,
n_layers=1,
circuit_type="Circuit_19",
data_reupload=True,
initialization="random",
output_qubit=0,
shots=test_case["shots"],
)
model = Model(
n_qubits=2,
n_layers=1,
circuit_type="Circuit_19",
)

result = model(
model.params,
inputs=None,
noise_params=None,
cache=True,
execution_type=test_case["execution_type"],
)
result = model(
model.params,
inputs=None,
cache=True,
)

assert result.shape == test_case["shape"], f"Test case: {test_case} failed"
hs = hashlib.md5(
repr(
{
"n_qubits": model.n_qubits,
"n_layers": model.n_layers,
"pqc": model.pqc.__class__.__name__,
"dru": model.data_reupload,
"params": model.params,
"noise_params": model.noise_params,
"execution_type": model.execution_type,
"inputs": None,
"output_qubit": model.output_qubit,
}
).encode("utf-8")
).hexdigest()

cache_folder: str = ".cache"
if not os.path.exists(cache_folder):
raise Exception("Cache folder does not exist.")

name: str = f"pqc_{hs}.npy"
file_path: str = os.path.join(cache_folder, name)

if os.path.isfile(file_path):
cached_result = np.load(file_path)

assert np.array_equal(
result, cached_result
), "Cached result and calcualted result is not equal."


def test_initialization() -> None:
Expand Down Expand Up @@ -390,55 +396,79 @@ def test_local_and_global_meas() -> None:
inputs = np.array([0.1, 0.2, 0.3])
test_cases = [
{
"inputs": None,
"execution_type": "expval",
"output_qubit": -1,
"shots": -1,
"out_shape": (2, 1),
"warning": False,
},
{
"inputs": np.array([0.1, 0.2, 0.3]),
"execution_type": "expval",
"output_qubit": -1,
"shots": -1,
"out_shape": (2, 3),
"warning": False,
},
{
"inputs": np.array([0.1, 0.2, 0.3]),
"execution_type": "expval",
"output_qubit": 0,
"shots": -1,
"out_shape": (3,),
"warning": False,
},
{
"inputs": np.array([0.1, 0.2, 0.3]),
"execution_type": "expval",
"output_qubit": [0, 1],
"shots": -1,
"out_shape": (3,),
"warning": False,
},
{
"inputs": None,
"execution_type": "density",
"output_qubit": -1,
"shots": -1,
"out_shape": (4, 4),
"warning": False,
},
{
"inputs": np.array([0.1, 0.2, 0.3]),
"execution_type": "density",
"output_qubit": -1,
"shots": -1,
"out_shape": (3, 4, 4),
"warning": False,
},
{
"inputs": np.array([0.1, 0.2, 0.3]),
"execution_type": "density",
"output_qubit": 0,
"shots": -1,
"out_shape": (3, 4, 4),
"warning": True,
},
{
"inputs": np.array([0.1, 0.2, 0.3]),
"execution_type": "probs",
"output_qubit": -1,
"shots": 1024,
"out_shape": (3, 4),
"warning": False,
},
{
"inputs": np.array([0.1, 0.2, 0.3]),
"execution_type": "probs",
"output_qubit": 0,
"shots": 1024,
"out_shape": (3, 2),
"warning": False,
},
{
"inputs": np.array([0.1, 0.2, 0.3]),
"execution_type": "probs",
"output_qubit": [0, 1],
"shots": 1024,
Expand All @@ -461,15 +491,15 @@ def test_local_and_global_meas() -> None:
with pytest.warns(UserWarning):
out = model(
model.params,
inputs=inputs,
inputs=test_case["inputs"],
noise_params=None,
cache=False,
execution_type=test_case["execution_type"],
)
else:
out = model(
model.params,
inputs=inputs,
inputs=test_case["inputs"],
noise_params=None,
cache=False,
execution_type=test_case["execution_type"],
Expand All @@ -486,17 +516,19 @@ def test_parity() -> None:
n_qubits=2,
n_layers=1,
circuit_type="Circuit_1",
output_qubit=[0, 1],
output_qubit=[0, 1], # parity
)
model_b = Model(
n_qubits=2,
n_layers=1,
circuit_type="Circuit_1",
output_qubit=-1,
output_qubit=-1, # individual
)

result_a = model_a(model_a.params, inputs=None, cache=False, force_mean=True)
result_b = model_b(model_a.params, inputs=None, cache=False) # use same params!
result_a = model_a(params=model_a.params, inputs=None, force_mean=True)
result_b = model_b(
params=model_a.params, inputs=None, force_mean=True
) # use same params!

assert not np.allclose(
result_a, result_b
Expand Down

0 comments on commit 7ca2ce9

Please sign in to comment.