diff --git a/docs/sample_page.md b/docs/sample_page.md index e0726f1..ffe0990 100644 --- a/docs/sample_page.md +++ b/docs/sample_page.md @@ -1,8 +1,6 @@ This is just a sample notebook to showcase the rendering of Jupyter notebooks in the documentation. ```python exec="on" source="material-block" session="main" -from qadence_libs.main import main - -msg = main() +msg = "To be udpated" print(msg) ``` diff --git a/pyproject.toml b/pyproject.toml index 43829f6..31e6aed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,9 +9,10 @@ readme = "README.md" authors = [ { name = "Mario Dagrada", email = "mario.dagrada@pasqal.com" }, { name = "Roland Guichard", email = "roland.guichard@pasqal.com" }, - { name = "Raja Selvarajan", email = "raja.selvarajan@pasqal.com" }, - { name = "Gergana Velikova", email = "gergana.velikova@pasqal.com" }, - + { name = "João Moutinho", email = "joao.moutinho@pasqal.com" }, + { name = "Vincent Elfving", email = "vincent.elfving@pasqal.com" }, + { name = "Dominik Seitz", email = "dominik.seitz@pasqal.com" }, + { name = "Niklas Heim", email = "niklas.heim@pasqal.com" }, ] requires-python = ">=3.9,<3.12" license = {text = "Apache 2.0"} @@ -29,7 +30,6 @@ classifiers=[ ] dependencies = [ "torch", - "qutip==4.7.2", "qadence[braket, pulser, horqrux]==1.2.7", ] @@ -37,6 +37,11 @@ dependencies = [ allow-direct-references = true allow-ambiguous-features = true +[project.optional-dependencies] +visualization = [ + "graphviz", +] + [tool.hatch.envs.default] dependencies = [ "flaky", @@ -53,6 +58,8 @@ dependencies = [ "pydocstringformatter", ] +features = ["visualization"] + [tool.hatch.envs.default.scripts] test = "pytest -n auto --cov-report lcov --cov-config=pyproject.toml --cov=qadence_libs --cov=tests --ignore=./tests/test_examples.py {args}" test-examples = "pytest ./tests/test_examples.py {args}" diff --git a/qadence_libs/__init__.py b/qadence_libs/__init__.py index e69de29..fc3af24 100644 --- a/qadence_libs/__init__.py +++ b/qadence_libs/__init__.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from importlib import import_module + +from .constructors import * + +"""Fetch the functions defined in the __all__ of each sub-module. + +Import to the qadence name space. Make sure each added submodule has the respective definition: + + - `__all__ = ["function0", "function1", ...]` + +Furthermore, add the submodule to the list below to automatically build +the __all__ of the qadence namespace. Make sure to keep alphabetical ordering. +""" + +list_of_submodules = [ + ".constructors", +] + +__all__ = [] +for submodule in list_of_submodules: + __all_submodule__ = getattr(import_module(submodule, package="qadence_libs"), "__all__") + __all__ += __all_submodule__ diff --git a/qadence_libs/constructors/__init__.py b/qadence_libs/constructors/__init__.py new file mode 100644 index 0000000..e95eef3 --- /dev/null +++ b/qadence_libs/constructors/__init__.py @@ -0,0 +1,29 @@ +# flake8: noqa + +from .feature_maps import ( + feature_map, + exp_fourier_feature_map, +) + +from .hea import hea + +from .iia import identity_initialized_ansatz + +from .rydberg_hea import rydberg_hea, rydberg_hea_layer +from .rydberg_feature_maps import rydberg_feature_map, analog_feature_map, rydberg_tower_feature_map + +from .qft import qft + +# Modules to be automatically added to the qadence namespace +__all__ = [ + "feature_map", + "exp_fourier_feature_map", + "hea", + "identity_initialized_ansatz", + "qft", + "rydberg_hea", + "rydberg_hea_layer", + "rydberg_feature_map", + "analog_feature_map", + "rydberg_tower_feature_map", +] diff --git a/qadence_libs/constructors/feature_maps.py b/qadence_libs/constructors/feature_maps.py new file mode 100644 index 0000000..9c4860e --- /dev/null +++ b/qadence_libs/constructors/feature_maps.py @@ -0,0 +1,233 @@ +from __future__ import annotations + +from collections.abc import Callable +from math import isclose +from typing import Union + +from qadence.blocks import AbstractBlock, KronBlock, chain, kron, tag +from qadence.logger import get_logger +from qadence.operations import PHASE, RX, RY, RZ, H +from qadence.parameters import FeatureParameter, Parameter, VariationalParameter +from qadence.types import PI, BasisSet, ReuploadScaling, TParameter +from sympy import Basic, acos + +logger = get_logger(__name__) + +ROTATIONS = [RX, RY, RZ, PHASE] +RotationTypes = type[Union[RX, RY, RZ, PHASE]] + + +def _set_range(fm_type: BasisSet | Callable | str) -> tuple[float, float]: + if fm_type == BasisSet.FOURIER: + return (0.0, 2 * PI) + elif fm_type == BasisSet.CHEBYSHEV: + return (-1.0, 1.0) + else: + return (0.0, 1.0) + + +RS_FUNC_DICT = { + ReuploadScaling.CONSTANT: lambda i: 1, + ReuploadScaling.TOWER: lambda i: float(i + 1), + ReuploadScaling.EXP: lambda i: float(2**i), +} + + +def fm_parameter_scaling( + fm_type: BasisSet | Callable | str, + param: Parameter | str = "phi", + feature_range: tuple[float, float] | None = None, + target_range: tuple[float, float] | None = None, +) -> Parameter | Basic: + if isinstance(param, Parameter): + fparam = param + fparam.trainable = False + else: + fparam = FeatureParameter(param) + + # Set feature and target range + feature_range = _set_range(fm_type) if feature_range is None else feature_range + target_range = _set_range(fm_type) if target_range is None else target_range + + # Rescale the feature parameter + scaling = (max(target_range) - min(target_range)) / (max(feature_range) - min(feature_range)) + shift = min(target_range) - min(feature_range) * scaling + + if isclose(scaling, 1.0): + # So we don't get 1.0 factor in visualization + scaled_fparam = fparam + shift + else: + scaled_fparam = scaling * fparam + shift + + return scaled_fparam + + +def fm_parameter_func(fm_type: BasisSet | Callable | str) -> Callable: + def ident_fn(x: TParameter) -> TParameter: + return x + + # Transform feature parameter + if fm_type == BasisSet.FOURIER: + transform_func = ident_fn + elif fm_type == BasisSet.CHEBYSHEV: + transform_func = acos + elif callable(fm_type): + transform_func = fm_type + else: + raise NotImplementedError( + f"Feature map type {fm_type} not implemented. Choose an item from the BasisSet " + f"enum: {BasisSet.list()}, or a custom defined sympy function to wrap " + "the given feature parameter with." + ) + + return transform_func + + +def fm_reupload_scaling_fn( + reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT, +) -> tuple[Callable, str]: + # Set reupload scaling function + if callable(reupload_scaling): + rs_func = reupload_scaling + rs_tag = "Custom" + else: + rs_func = RS_FUNC_DICT.get(reupload_scaling, None) # type: ignore [call-overload] + if rs_func is None: + raise NotImplementedError( + f"Reupload scaling {reupload_scaling} not implemented; choose an item from " + f"the ReuploadScaling enum: {[rs.name for rs in ReuploadScaling]}, or your own " + "python function with a single int arg as input and int or float output." + ) + if isinstance(reupload_scaling, ReuploadScaling): + rs_tag = reupload_scaling.value + else: + rs_tag = reupload_scaling + + return rs_func, rs_tag + + +def feature_map( + n_qubits: int, + support: tuple[int, ...] | None = None, + param: Parameter | str = "phi", + op: RotationTypes = RX, + fm_type: BasisSet | Callable | str = BasisSet.FOURIER, + reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT, + feature_range: tuple[float, float] | None = None, + target_range: tuple[float, float] | None = None, + multiplier: Parameter | TParameter | None = None, + param_prefix: str | None = None, +) -> KronBlock: + """Construct a feature map of a given type. + + Arguments: + n_qubits: Number of qubits the feature map covers. Results in `support=range(n_qubits)`. + support: Puts one feature-encoding rotation gate on every qubit in `support`. n_qubits in + this case specifies the total overall qubits of the circuit, which may be wider than the + support itself, but not narrower. + param: Parameter of the feature map; you can pass a string or Parameter; + it will be set as non-trainable (FeatureParameter) regardless. + op: Rotation operation of the feature map; choose from RX, RY, RZ or PHASE. + fm_type: Basis set for data encoding; choose from `BasisSet.FOURIER` for Fourier + encoding, or `BasisSet.CHEBYSHEV` for Chebyshev polynomials of the first kind. + reupload_scaling: how the feature map scales the data that is re-uploaded for each qubit. + choose from `ReuploadScaling` enumeration or provide your own function with a single + int as input and int or float as output. + feature_range: range of data that the input data provided comes from. Used to map input data + to the correct domain of the feature-encoding function. + target_range: range of data the data encoder assumes as the natural range. For example, + in Chebyshev polynomials it is (-1, 1), while for Fourier it may be chosen as (0, 2*PI). + Used to map data to the correct domain of the feature-encoding function. + multiplier: overall multiplier; this is useful for reuploading the feature map serially with + different scalings; can be a number or parameter/expression. + param_prefix: string prefix to create trainable parameters multiplying the feature parameter + inside the feature-encoding function. Note that currently this does not take into + account the domain of the feature-encoding function. + + Example: + ```python exec="on" source="material-block" result="json" + from qadence_libs import feature_map, BasisSet, ReuploadScaling + + fm = feature_map(3, fm_type=BasisSet.FOURIER) + print(f"{fm = }") + + fm = feature_map(3, fm_type=BasisSet.CHEBYSHEV) + print(f"{fm = }") + + fm = feature_map(3, fm_type=BasisSet.FOURIER, reupload_scaling = ReuploadScaling.TOWER) + print(f"{fm = }") + ``` + """ + + # Process input + if support is None: + support = tuple(range(n_qubits)) + elif len(support) != n_qubits: + raise ValueError("Wrong qubit support supplied") + + if op not in ROTATIONS: + raise ValueError( + f"Operation {op} not supported. " + f"Please provide one from {[rot.__name__ for rot in ROTATIONS]}." + ) + + scaled_fparam = fm_parameter_scaling( + fm_type, param, feature_range=feature_range, target_range=target_range + ) + + transform_func = fm_parameter_func(fm_type) + + basis_tag = fm_type.value if isinstance(fm_type, BasisSet) else str(fm_type) + rs_func, rs_tag = fm_reupload_scaling_fn(reupload_scaling) + + # Set overall multiplier + multiplier = 1 if multiplier is None else Parameter(multiplier) + + # Build feature map + op_list = [] + fparam = scaled_fparam + for i, qubit in enumerate(support): + if param_prefix is not None: + train_param = VariationalParameter(param_prefix + f"_{i}") + fparam = train_param * scaled_fparam + op_list.append(op(qubit, multiplier * rs_func(i) * transform_func(fparam))) + fm = kron(*op_list) + + fm.tag = rs_tag + " " + basis_tag + " FM" + + return fm + + +def exp_fourier_feature_map( + n_qubits: int, + support: tuple[int, ...] = None, + param: str = "x", + feature_range: tuple[float, float] = None, +) -> AbstractBlock: + """ + Exponential fourier feature map. + + Args: + n_qubits: number of qubits in the feature + support: qubit support + param: name of feature `Parameter` + feature_range: min and max value of the feature, as floats in a Tuple + """ + + if feature_range is None: + feature_range = (0.0, 2.0**n_qubits) + + support = tuple(range(n_qubits)) if support is None else support + hlayer = kron(H(qubit) for qubit in support) + rlayer = feature_map( + n_qubits, + support=support, + param=param, + op=RZ, + fm_type=BasisSet.FOURIER, + reupload_scaling=ReuploadScaling.EXP, + feature_range=feature_range, + target_range=(0.0, 2 * PI), + ) + rlayer.tag = None + return tag(chain(hlayer, rlayer), f"ExpFourierFM({param})") diff --git a/qadence_libs/constructors/hea.py b/qadence_libs/constructors/hea.py new file mode 100644 index 0000000..2509c1e --- /dev/null +++ b/qadence_libs/constructors/hea.py @@ -0,0 +1,321 @@ +from __future__ import annotations + +import itertools +from typing import Any, Type, Union + +from qadence.blocks import AbstractBlock, block_is_qubit_hamiltonian, chain, kron, tag +from qadence.constructors.hamiltonians import hamiltonian_factory +from qadence.operations import CNOT, CPHASE, CRX, CRY, CRZ, CZ, RX, RY, HamEvo +from qadence.types import Interaction, Strategy + +DigitalEntanglers = Union[CNOT, CZ, CRZ, CRY, CRX] + + +def hea( + n_qubits: int, + depth: int = 1, + param_prefix: str = "theta", + support: tuple[int, ...] = None, + strategy: Strategy = Strategy.DIGITAL, + **strategy_args: Any, +) -> AbstractBlock: + """ + Factory function for the Hardware Efficient Ansatz (HEA). + + Args: + n_qubits: number of qubits in the block + depth: number of layers of the HEA + param_prefix: the base name of the variational parameters + support: qubit indexes where the HEA is applied + strategy: Strategy.Digital or Strategy.DigitalAnalog + **strategy_args: see below + + Keyword Arguments: + operations (list): list of operations to cycle through in the + digital single-qubit rotations of each layer. Valid for + Digital and DigitalAnalog HEA. + periodic (bool): if the qubits should be linked periodically. + periodic=False is not supported in emu-c. Valid for only + for Digital HEA. + entangler (AbstractBlock): + - Digital: 2-qubit entangling operation. Supports CNOT, CZ, + CRX, CRY, CRZ, CPHASE. Controlled rotations will have variational + parameters on the rotation angles. + - DigitaAnalog | Analog: Hamiltonian generator for the + analog entangling layer. Defaults to global ZZ Hamiltonian. + Time parameter is considered variational. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence import RZ, RX + from qadence_libs import hea + + # create the circuit + n_qubits, depth = 2, 4 + ansatz = hea( + n_qubits=n_qubits, + depth=depth, + strategy="sDAQC", + operations=[RZ,RX,RZ] + ) + ``` + """ + + if support is None: + support = tuple(range(n_qubits)) + + hea_func_dict = { + Strategy.DIGITAL: hea_digital, + Strategy.SDAQC: hea_sDAQC, + Strategy.BDAQC: hea_bDAQC, + Strategy.ANALOG: hea_analog, + } + + try: + hea_func = hea_func_dict[strategy] + except KeyError: + raise KeyError(f"Strategy {strategy} not recognized.") + + hea_block: AbstractBlock = hea_func( + n_qubits=n_qubits, + depth=depth, + param_prefix=param_prefix, + support=support, + **strategy_args, + ) # type: ignore + + return hea_block + + +############# +## DIGITAL ## +############# + + +def _rotations_digital( + n_qubits: int, + depth: int, + param_prefix: str = "theta", + support: tuple[int, ...] = None, + operations: list[Type[AbstractBlock]] = [RX, RY, RX], +) -> list[AbstractBlock]: + """Creates the layers of single qubit rotations in an HEA.""" + if support is None: + support = tuple(range(n_qubits)) + iterator = itertools.count() + rot_list: list[AbstractBlock] = [] + for d in range(depth): + rots = [ + kron( + gate(support[n], param_prefix + f"_{next(iterator)}") # type: ignore [arg-type] + for n in range(n_qubits) + ) + for gate in operations + ] + rot_list.append(chain(*rots)) + return rot_list + + +def _entangler( + control: int, + target: int, + param_str: str, + op: Type[DigitalEntanglers] = CNOT, +) -> AbstractBlock: + if op in [CNOT, CZ]: + return op(control, target) # type: ignore + elif op in [CRZ, CRY, CRX, CPHASE]: + return op(control, target, param_str) # type: ignore + else: + raise ValueError("Provided entangler not accepted for digital HEA.") + + +def _entanglers_digital( + n_qubits: int, + depth: int, + param_prefix: str = "theta", + support: tuple[int, ...] = None, + periodic: bool = False, + entangler: Type[DigitalEntanglers] = CNOT, +) -> list[AbstractBlock]: + """Creates the layers of digital entangling operations in an HEA.""" + if support is None: + support = tuple(range(n_qubits)) + iterator = itertools.count() + ent_list: list[AbstractBlock] = [] + for d in range(depth): + ents = [] + ents.append( + kron( + _entangler( + control=support[n], + target=support[n + 1], + param_str=param_prefix + f"_ent_{next(iterator)}", + op=entangler, + ) + for n in range(n_qubits) + if not n % 2 and n < n_qubits - 1 + ) + ) + if n_qubits > 2: + ents.append( + kron( + _entangler( + control=support[n], + target=support[(n + 1) % n_qubits], + param_str=param_prefix + f"_ent_{next(iterator)}", + op=entangler, + ) + for n in range(n_qubits - (not periodic)) + if n % 2 + ) + ) + ent_list.append(chain(*ents)) + return ent_list + + +def hea_digital( + n_qubits: int, + depth: int = 1, + param_prefix: str = "theta", + periodic: bool = False, + operations: list[type[AbstractBlock]] = [RX, RY, RX], + support: tuple[int, ...] = None, + entangler: Type[DigitalEntanglers] = CNOT, +) -> AbstractBlock: + """ + Construct the Digital Hardware Efficient Ansatz (HEA). + + Args: + n_qubits (int): number of qubits in the block. + depth (int): number of layers of the HEA. + param_prefix (str): the base name of the variational parameters + periodic (bool): if the qubits should be linked periodically. + periodic=False is not supported in emu-c. + operations (list): list of operations to cycle through in the + digital single-qubit rotations of each layer. + support (tuple): qubit indexes where the HEA is applied. + entangler (AbstractBlock): 2-qubit entangling operation. + Supports CNOT, CZ, CRX, CRY, CRZ. Controlld rotations + will have variational parameters on the rotation angles. + """ + try: + if entangler not in [CNOT, CZ, CRX, CRY, CRZ, CPHASE]: + raise ValueError( + "Please provide a valid two-qubit entangler operation for digital HEA." + ) + except TypeError: + raise ValueError("Please provide a valid two-qubit entangler operation for digital HEA.") + + rot_list = _rotations_digital( + n_qubits=n_qubits, + depth=depth, + param_prefix=param_prefix, + support=support, + operations=operations, + ) + + ent_list = _entanglers_digital( + n_qubits=n_qubits, + depth=depth, + param_prefix=param_prefix, + support=support, + periodic=periodic, + entangler=entangler, + ) + + layers = [] + for d in range(depth): + layers.append(rot_list[d]) + layers.append(ent_list[d]) + return tag(chain(*layers), "HEA") + + +########### +## sDAQC ## +########### + + +def _entanglers_analog( + depth: int, + param_prefix: str = "theta", + entangler: AbstractBlock | None = None, +) -> list[AbstractBlock]: + return [HamEvo(entangler, param_prefix + f"_t_{d}") for d in range(depth)] + + +def hea_sDAQC( + n_qubits: int, + depth: int = 1, + param_prefix: str = "theta", + operations: list[type[AbstractBlock]] = [RX, RY, RX], + support: tuple[int, ...] = None, + entangler: AbstractBlock | None = None, +) -> AbstractBlock: + """ + Construct the Hardware Efficient Ansatz (HEA) with analog entangling layers. + + It uses step-wise digital-analog computation. + + Args: + n_qubits (int): number of qubits in the block. + depth (int): number of layers of the HEA. + param_prefix (str): the base name of the variational parameters + operations (list): list of operations to cycle through in the + digital single-qubit rotations of each layer. + support (tuple): qubit indexes where the HEA is applied. + entangler (AbstractBlock): Hamiltonian generator for the + analog entangling layer. Defaults to global ZZ Hamiltonian. + Time parameter is considered variational. + """ + + # TODO: Add qubit support + if entangler is None: + entangler = hamiltonian_factory(n_qubits, interaction=Interaction.NN) + try: + if not block_is_qubit_hamiltonian(entangler): + raise ValueError( + "Please provide a valid Pauli Hamiltonian generator for digital-analog HEA." + ) + except NotImplementedError: + raise ValueError( + "Please provide a valid Pauli Hamiltonian generator for digital-analog HEA." + ) + + rot_list = _rotations_digital( + n_qubits=n_qubits, + depth=depth, + param_prefix=param_prefix, + support=support, + operations=operations, + ) + + ent_list = _entanglers_analog( + depth=depth, + param_prefix=param_prefix, + entangler=entangler, + ) + + layers = [] + for d in range(depth): + layers.append(rot_list[d]) + layers.append(ent_list[d]) + return tag(chain(*layers), "HEA-sDA") + + +########### +## bDAQC ## +########### + + +def hea_bDAQC(*args: Any, **kwargs: Any) -> Any: + raise NotImplementedError + + +############ +## ANALOG ## +############ + + +def hea_analog(*args: Any, **kwargs: Any) -> Any: + raise NotImplementedError diff --git a/qadence_libs/constructors/iia.py b/qadence_libs/constructors/iia.py new file mode 100644 index 0000000..b5c5def --- /dev/null +++ b/qadence_libs/constructors/iia.py @@ -0,0 +1,213 @@ +from __future__ import annotations + +from typing import Any, Type, Union + +import torch +from qadence.blocks import AbstractBlock, KronBlock, block_is_qubit_hamiltonian, chain, kron, tag +from qadence.constructors.hamiltonians import hamiltonian_factory +from qadence.operations import CNOT, CPHASE, CRX, CRY, CRZ, CZ, RX, RY, HamEvo +from qadence.parameters import Parameter +from qadence.types import PI, Interaction, Strategy + +DigitalEntanglers = Union[CNOT, CZ, CRZ, CRY, CRX, CPHASE] + + +def _entangler( + control: int, + target: int, + param_str: str, + entangler: Type[DigitalEntanglers] = CNOT, +) -> AbstractBlock: + if entangler in [CNOT, CZ]: + return entangler(control, target) # type: ignore + elif entangler in [CRZ, CRY, CRX, CPHASE]: + param = Parameter(param_str, value=0.0, trainable=True) + return entangler(control, target, param) # type: ignore + else: + raise ValueError("Provided entangler not accepted for digital ansatz.") + + +def _entangler_analog( + param_str: str, + generator: AbstractBlock | None = None, +) -> AbstractBlock: + param = Parameter(name=param_str, value=0.0, trainable=True) + return HamEvo(generator=generator, parameter=param) + + +def _rotations( + n_qubits: int, + layer: int, + side: str, + param_str: str, + values: list[float | torch.Tensor], + ops: list[type[AbstractBlock]] = [RX, RY], +) -> list[KronBlock]: + if side == "left": + idx = lambda x: x # noqa: E731 + elif side == "right": + idx = lambda x: len(ops) - x - 1 # noqa: E731 + ops = list(reversed(ops)) + else: + raise ValueError("Please provide either 'left' or 'right'") + + rot_list = [] + for i, gate in enumerate(ops): + rot_list.append( + kron( + gate( + target=n, # type: ignore [call-arg] + parameter=Parameter( # type: ignore [call-arg] + name=param_str + f"_{layer}{n + n_qubits * idx(i)}", + value=values[n + n_qubits * idx(i)], + trainable=True, + ), + ) + for n in range(n_qubits) + ) + ) + + return rot_list + + +def identity_initialized_ansatz( + n_qubits: int, + depth: int = 1, + param_prefix: str = "iia", + strategy: Strategy = Strategy.DIGITAL, + rotations: Any = [RX, RY], + entangler: Any = None, + periodic: bool = False, +) -> AbstractBlock: + """ + Identity block for barren plateau mitigation. + + The initial configuration of this block is equal to an identity unitary + but can be trained in the same fashion as other ansatzes, reaching same level + of expressivity. + + Args: + n_qubits: number of qubits in the block + depth: number of layers of the HEA + param_prefix (str): + The base name of the variational parameter. Defaults to "iia". + strategy: (Strategy) + Strategy.DIGITAL for fully digital or Strategy.SDAQC for digital-analog. + rotations (list of AbstractBlocks): + single-qubit rotations with trainable parameters + entangler (AbstractBlock): + For Digital: + 2-qubit entangling operation. Supports CNOT, CZ, CRX, CRY, CRZ, CPHASE. + Controlled rotations will have variational parameters on the rotation angles. + Defaults to CNOT. + For Digital-analog: + Hamiltonian generator for the analog entangling layer. + Time parameter is considered variational. + Defaults to a global NN Hamiltonain. + periodic (bool): if the qubits should be linked periodically. Valid only for digital. + """ + initialized_layers = [] + for layer in range(depth): + alpha = 2 * PI * torch.rand(n_qubits * len(rotations)) + gamma = torch.zeros(n_qubits) + beta = -alpha + + left_rotations = _rotations( + n_qubits=n_qubits, + layer=layer, + side="left", + param_str=f"{param_prefix}_α", + values=alpha, + ops=rotations, + ) + + if strategy == Strategy.DIGITAL: + if entangler is None: + entangler = CNOT + + if entangler not in [CNOT, CZ, CRZ, CRY, CRX, CPHASE]: + raise ValueError( + "Please provide a valid two-qubit entangler operation for digital IIA." + ) + + ent_param_prefix = f"{param_prefix}_θ_ent_" + if not periodic: + left_entanglers = [ + chain( + _entangler( + control=n, + target=n + 1, + param_str=ent_param_prefix + f"_{layer}{n}", + entangler=entangler, + ) + for n in range(n_qubits - 1) + ) + ] + else: + left_entanglers = [ + chain( + _entangler( + control=n, + target=(n + 1) % n_qubits, + param_str=ent_param_prefix + f"_{layer}{n}", + entangler=entangler, + ) + for n in range(n_qubits) + ) + ] + + elif strategy == Strategy.SDAQC: + if entangler is None: + entangler = hamiltonian_factory(n_qubits, interaction=Interaction.NN) + + if not block_is_qubit_hamiltonian(entangler): + raise ValueError( + "Please provide a valid Pauli Hamiltonian generator for digital-analog IIA." + ) + + ent_param_prefix = f"{param_prefix}_ent_t" + + left_entanglers = [ + chain( + _entangler_analog( + param_str=f"{ent_param_prefix}_{layer}", + generator=entangler, + ) + ) + ] + + else: + raise NotImplementedError + + centre_rotations = [ + kron( + RX( + target=n, + parameter=Parameter(name=f"{param_prefix}_γ" + f"_{layer}{n}", value=gamma[n]), + ) + for n in range(n_qubits) + ) + ] + + right_entanglers = reversed(*left_entanglers) + + right_rotations = _rotations( + n_qubits=n_qubits, + layer=layer, + side="right", + param_str=f"{param_prefix}_β", + values=beta, + ops=rotations, + ) + + krons = [ + *left_rotations, + *left_entanglers, + *centre_rotations, + *right_entanglers, + *right_rotations, + ] + + initialized_layers.append(tag(chain(*krons), tag=f"BPMA-{layer}")) + + return chain(*initialized_layers) diff --git a/qadence_libs/constructors/qft.py b/qadence_libs/constructors/qft.py new file mode 100644 index 0000000..cbf55f3 --- /dev/null +++ b/qadence_libs/constructors/qft.py @@ -0,0 +1,249 @@ +from __future__ import annotations + +from typing import Any + +import torch +from qadence.blocks import AbstractBlock, add, chain, kron, tag +from qadence.constructors import hamiltonian_factory +from qadence.constructors.daqc import daqc_transform +from qadence.operations import CPHASE, SWAP, H, HamEvo, I, Z +from qadence.types import PI, Interaction, Strategy + + +def qft( + n_qubits: int, + support: tuple[int, ...] = None, + inverse: bool = False, + reverse_in: bool = False, + swaps_out: bool = False, + strategy: Strategy = Strategy.DIGITAL, + gen_build: AbstractBlock | None = None, +) -> AbstractBlock: + """ + The Quantum Fourier Transform. + + Depending on the application, user should be careful with qubit ordering + in the input and output. This can be controlled with reverse_in and swaps_out + arguments. + + Args: + n_qubits: number of qubits in the QFT + support: qubit support to use + inverse: True performs the inverse QFT + reverse_in: Reverses the input qubits to account for endianness + swaps_out: Performs swaps on the output qubits to match the "textbook" QFT. + strategy: Strategy.Digital or Strategy.sDAQC + gen_build: building block Ising Hamiltonian for the DAQC transform. + Defaults to constant all-to-all Ising. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence_libs import qft + + n_qubits = 3 + + qft_circuit = qft(n_qubits, strategy = "sDAQC") + ``` + """ + + if support is None: + support = tuple(range(n_qubits)) + + assert len(support) <= n_qubits, "Wrong qubit support supplied" + + if reverse_in: + support = support[::-1] + + qft_layer_dict = { + Strategy.DIGITAL: _qft_layer_digital, + Strategy.SDAQC: _qft_layer_sDAQC, + Strategy.BDAQC: _qft_layer_bDAQC, + Strategy.ANALOG: _qft_layer_analog, + } + + try: + layer_func = qft_layer_dict[strategy] + except KeyError: + raise KeyError(f"Strategy {strategy} not recognized.") + + qft_layers = reversed(range(n_qubits)) if inverse else range(n_qubits) + + qft_circ = chain( + layer_func( + n_qubits=n_qubits, support=support, layer=layer, inverse=inverse, gen_build=gen_build + ) # type: ignore + for layer in qft_layers + ) + + if swaps_out: + swap_ops = [SWAP(support[i], support[n_qubits - i - 1]) for i in range(n_qubits // 2)] + qft_circ = chain(*swap_ops, qft_circ) if inverse else chain(qft_circ, *swap_ops) + + return tag(qft_circ, tag="iQFT") if inverse else tag(qft_circ, tag="QFT") + + +######################## +# STANDARD DIGITAL QFT # +######################## + + +def _qft_layer_digital( + n_qubits: int, + support: tuple[int, ...], + layer: int, + inverse: bool, + gen_build: AbstractBlock | None = None, +) -> AbstractBlock: + """ + Apply the Hadamard gate followed by CPHASE gates. + + This corresponds to one layer of the QFT. + """ + qubit_range_layer = ( + reversed(range(layer + 1, n_qubits)) if inverse else range(layer + 1, n_qubits) + ) + rots = [] + for j in qubit_range_layer: + angle = torch.tensor( + ((-1) ** inverse) * 2 * PI / (2 ** (j - layer + 1)), dtype=torch.cdouble + ) + rots.append(CPHASE(support[j], support[layer], angle)) # type: ignore + if inverse: + return chain(*rots, H(support[layer])) # type: ignore + return chain(H(support[layer]), *rots) # type: ignore + + +######################################## +# DIGITAL-ANALOG QFT (with sDAQC) # +# [1] https://arxiv.org/abs/1906.07635 # +######################################## + + +def _theta(k: int) -> float: + """Equation (16) from [1].""" + return float(PI / (2 ** (k + 1))) + + +def _alpha(c: int, m: int, k: int) -> float: + """Equation (16) from [1].""" + if c == m: + return float(PI / (2 ** (k - m + 2))) + else: + return 0.0 + + +def _sqg_gen(n_qubits: int, support: tuple[int, ...], m: int, inverse: bool) -> list[AbstractBlock]: + """Equation (13) from [1]. + + Creates the generator corresponding to single-qubit rotations coming + out of the CPHASE decomposition. The paper also includes the generator + for the Hadamard of each layer here, but we left it explicit at + the start of each layer. + """ + k_sqg_list = reversed(range(2, n_qubits - m + 2)) if inverse else range(2, n_qubits - m + 2) + + sqg_gen_list = [] + for k in k_sqg_list: + sqg_gen = ( + kron(I(support[j]) for j in range(n_qubits)) - Z(support[k + m - 2]) - Z(support[m - 1]) + ) + sqg_gen_list.append(_theta(k) * sqg_gen) + + return sqg_gen_list + + +def _tqg_gen(n_qubits: int, support: tuple[int, ...], m: int, inverse: bool) -> list[AbstractBlock]: + """Equation (14) from [1]. + + Creates the generator corresponding to the two-qubit ZZ + interactions coming out of the CPHASE decomposition. + """ + k_tqg_list = reversed(range(2, n_qubits + 1)) if inverse else range(2, n_qubits + 1) + + tqg_gen_list = [] + for k in k_tqg_list: + for c in range(1, k): + tqg_gen = kron(Z(support[c - 1]), Z(support[k - 1])) + tqg_gen_list.append(_alpha(c, m, k) * tqg_gen) + + return tqg_gen_list + + +def _qft_layer_sDAQC( + n_qubits: int, + support: tuple[int, ...], + layer: int, + inverse: bool, + gen_build: AbstractBlock | None, +) -> AbstractBlock: + """ + QFT Layer using the sDAQC technique. + + Following the paper: + + -- [1] https://arxiv.org/abs/1906.07635 + + 4 - qubit edge case is not implemented. + + Note: the paper follows an index convention of running from 1 to N. A few functions + here also use that convention to be consistent with the paper. However, for qadence + related things the indices are converted to [0, N-1]. + """ + + # TODO: Properly check and include support for changing qubit support + allowed_support = tuple(range(n_qubits)) + if support != allowed_support and support != allowed_support[::-1]: + raise NotImplementedError("Changing support for DigitalAnalog QFT not yet supported.") + + if gen_build is None: + gen_build = hamiltonian_factory(n_qubits, interaction=Interaction.NN) + + m = layer + 1 # Paper index convention + + # Generator for the single-qubit rotations contributing to the CPHASE gate + sqg_gen_list = _sqg_gen(n_qubits=n_qubits, support=support, m=m, inverse=inverse) + + # Ising model representing the CPHASE gates two-qubit interactions + tqg_gen_list = _tqg_gen(n_qubits=n_qubits, support=support, m=m, inverse=inverse) + + if len(sqg_gen_list) > 0: + # Single-qubit rotations (leaving the Hadamard explicit) + sq_gate = chain(H(support[m - 1]), HamEvo(add(*sqg_gen_list), -1.0)) + + # Two-qubit interaction in the CPHASE converted with sDAQC + gen_cphases = add(*tqg_gen_list) + transformed_daqc_circuit = daqc_transform( + n_qubits=gen_build.n_qubits, + gen_target=gen_cphases, + t_f=-1.0, + gen_build=gen_build, + ) + + layer_circ = chain( + sq_gate, + transformed_daqc_circuit, + ) + if inverse: + return layer_circ.dagger() + return layer_circ # type: ignore + else: + return chain(H(support[m - 1])) # type: ignore + + +######################################## +# DIGITAL-ANALOG QFT (with bDAQC) # +# [1] https://arxiv.org/abs/1906.07635 # +######################################## + + +def _qft_layer_bDAQC(*args: Any, **kwargs: Any) -> Any: + raise NotImplementedError + + +############ +## ANALOG ## +############ + + +def _qft_layer_analog(*args: Any, **kwargs: Any) -> Any: + raise NotImplementedError diff --git a/qadence_libs/constructors/rydberg_feature_maps.py b/qadence_libs/constructors/rydberg_feature_maps.py new file mode 100644 index 0000000..577e8cf --- /dev/null +++ b/qadence_libs/constructors/rydberg_feature_maps.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +from typing import Callable + +import numpy as np +from qadence.blocks import AnalogBlock, KronBlock, kron +from qadence.logger import get_logger +from qadence.operations import AnalogRot, AnalogRX, AnalogRY, AnalogRZ +from qadence.parameters import FeatureParameter, Parameter, VariationalParameter +from qadence.types import PI, BasisSet, ReuploadScaling, TParameter +from sympy import Basic + +from qadence_libs.constructors.feature_maps import fm_parameter_func, fm_parameter_scaling + +logger = get_logger(__file__) + +AnalogRotationTypes = [AnalogRX, AnalogRY, AnalogRZ] + + +def rydberg_feature_map( + n_qubits: int, + param: str = "phi", + max_abs_detuning: float = 2 * PI * 10, + weights: list[float] | None = None, +) -> KronBlock: + """Feature map using semi-local addressing patterns. + + If not weights are specified, variational parameters are created + for the pattern + + Args: + n_qubits (int): number of qubits + param: the name of the feature parameter + max_abs_detuning: maximum value of absolute detuning for each qubit. Defaulted at 10 MHz. + weights: a list of wegiths to assign to each qubit parameter in the feature map + + Returns: + The block representing the feature map + """ + + tower_coeffs: list[float | Parameter] + tower_coeffs = ( + [VariationalParameter(f"w_{param}_{i}") for i in range(n_qubits)] + if weights is None + else weights + ) + tower_detuning = max_abs_detuning / (sum(tower_coeffs[i] for i in range(n_qubits))) + + param = FeatureParameter(param) + duration = 1000 * param / tower_detuning # type: ignore [operator] + return kron( + AnalogRot( + duration=duration, + delta=-tower_detuning * tower_coeffs[i], + phase=0.0, + qubit_support=(i,), + ) + for i in range(n_qubits) + ) + + +def rydberg_tower_feature_map( + n_qubits: int, param: str = "phi", max_abs_detuning: float = 2 * PI * 10 +) -> KronBlock: + weights = list(np.arange(1, n_qubits + 1)) + return rydberg_feature_map( + n_qubits, param=param, max_abs_detuning=max_abs_detuning, weights=weights + ) + + +def analog_feature_map( + param: str = "phi", + op: Callable[[Parameter | Basic], AnalogBlock] = AnalogRX, + fm_type: BasisSet | Callable | str = BasisSet.FOURIER, + reupload_scaling: ReuploadScaling | Callable | str = ReuploadScaling.CONSTANT, + feature_range: tuple[float, float] | None = None, + target_range: tuple[float, float] | None = None, + multiplier: Parameter | TParameter | None = None, +) -> AnalogBlock: + """Generate a fully analog feature map. + + Args: + param: Parameter of the feature map; you can pass a string or Parameter; + it will be set as non-trainable (FeatureParameter) regardless. + op: type of operation. Choose among AnalogRX, AnalogRY, AnalogRZ or a custom + callable function returning an AnalogBlock instance + fm_type: Basis set for data encoding; choose from `BasisSet.FOURIER` for Fourier + encoding, or `BasisSet.CHEBYSHEV` for Chebyshev polynomials of the first kind. + reupload_scaling: how the feature map scales the data that is re-uploaded. Given that + this feature map uses analog rotations, the reuploading works by simply + adding additional operations with different scaling factors in the parameter. + Choose from `ReuploadScaling` enumeration, currently only CONSTANT works, + or provide your own function with the first argument being the given + operation `op` and the second argument the feature parameter + feature_range: range of data that the input data is assumed to come from. + target_range: range of data the data encoder assumes as the natural range. For example, + in Chebyshev polynomials it is (-1, 1), while for Fourier it may be chosen as (0, 2*PI). + multiplier: overall multiplier; this is useful for reuploading the feature map serially with + different scalings; can be a number or parameter/expression. + """ + + scaled_fparam = fm_parameter_scaling( + fm_type, param, feature_range=feature_range, target_range=target_range + ) + + transform_func = fm_parameter_func(fm_type) + + transformed_feature = transform_func(scaled_fparam) + + multiplier = 1.0 if multiplier is None else Parameter(multiplier) + + if callable(reupload_scaling): + return reupload_scaling(op, multiplier * transformed_feature) # type: ignore[no-any-return] + elif reupload_scaling == ReuploadScaling.CONSTANT: + return op(multiplier * transformed_feature) + # TODO: implement tower scaling by reuploading multiple times + # using different analog rotations + else: + raise NotImplementedError(f"Reupload scaling {str(reupload_scaling)} not implemented!") diff --git a/qadence_libs/constructors/rydberg_hea.py b/qadence_libs/constructors/rydberg_hea.py new file mode 100644 index 0000000..7cbbbc8 --- /dev/null +++ b/qadence_libs/constructors/rydberg_hea.py @@ -0,0 +1,190 @@ +from __future__ import annotations + +from typing import Type, Union + +import qadence as qd +import sympy +from qadence.blocks import AddBlock, ChainBlock, add, chain +from qadence.constructors import hamiltonian_factory +from qadence.operations import N, X, Y, Z +from qadence.parameters import Parameter, VariationalParameter + +TPauliOp = Union[Type[X], Type[Y], Type[Z], Type[N]] + + +def _amplitude_map( + n_qubits: int, + pauli_op: TPauliOp, + weights: list[Parameter] | list[float] | None = None, +) -> AddBlock: + """Create an generator equivalent to a laser amplitude mapping on the device. + + Basically, given a certain quantum operation `pauli_op`, this routine constructs + the following generator: + + H = sum_i^N w_i OP(i) + + where the weights are variational parameters + + Args: + n_qubits: number of qubits + pauli_op: type of Pauli operation to use when creating the generator + weights: list of variational paramters with the weights + + Returns: + A block with the Hamiltonian generator + """ + if weights is None: + return add(pauli_op(j) for j in range(n_qubits)) + else: + assert len(weights) <= n_qubits, "Wrong weights supplied" + return add(w * pauli_op(j) for j, w in enumerate(weights)) # type:ignore [operator] + + +def rydberg_hea_layer( + register: qd.Register, + tevo_drive: Parameter | float, + tevo_det: Parameter | float, + tevo_wait: Parameter | float, + phase: Parameter | float | None = None, + detunings: list[Parameter] | list[float] | None = None, + drives: list[Parameter] | list[float] | None = None, + drive_scaling: float = 1.0, +) -> ChainBlock: + """A single layer of the Rydberg hardware efficient ansatz. + + Args: + register: the input register with atomic coordinates needed to build the interaction. + tevo_drive: a variational parameter for the duration of the drive term of + the Hamiltonian generator, including optional semi-local addressing + tevo_det: a variational parameter for the duration of the detuning term of the + Hamiltonian generator, including optional semi-local addressing + tevo_wait: a variational parameter for the duration of the waiting + time with interaction only + phase: a variational parameter representing the global phase. If None, the + global phase is set to 0 which results in a drive term in sigma^x only. Otherwise + both sigma^x and sigma^y terms will be present + detunings: a list of parameters with the weights of the locally addressed + detuning terms. These are variational parameters which are tuned by the optimizer + drives: a list of parameters with the weights of the locally addressed + drive terms. These are variational parameters which are tuned by the optimizer + drive_scaling: a scaling term to be added to the drive Hamiltonian generator + + Returns: + A block with a single layer of Rydberg HEA + """ + n_qubits = register.n_qubits + + drive_x = _amplitude_map(n_qubits, qd.X, weights=drives) + drive_y = _amplitude_map(n_qubits, qd.Y, weights=drives) + detuning = _amplitude_map(n_qubits, qd.N, weights=detunings) + interaction = hamiltonian_factory(register, qd.Interaction.NN) + + # drive and interaction are not commuting thus they need to be + # added directly into the final Hamiltonian generator + if phase is not None: + generator = ( + drive_scaling * sympy.cos(phase) * drive_x + - drive_scaling * sympy.sin(phase) * drive_y + + interaction + ) + else: + generator = drive_scaling * drive_x + interaction + + return chain( + qd.HamEvo(generator, tevo_drive), + # detuning and interaction are commuting, so they + # can be ordered arbitrarily and treated separately + qd.HamEvo(interaction, tevo_wait), + qd.HamEvo(detuning, tevo_det), + ) + + +def rydberg_hea( + register: qd.Register, + n_layers: int = 1, + addressable_detuning: bool = True, + addressable_drive: bool = False, + tunable_phase: bool = False, + additional_prefix: str = None, +) -> qd.blocks.ChainBlock: + """Hardware efficient ansatz for neutral atom (Rydberg) platforms. + + This constructor implements a variational ansatz which is very close to + what is implementable on 2nd generation PASQAL quantum devices. In particular, + it implements evolution over a specific Hamiltonian which can be realized on + the device. This Hamiltonian contains: + + * an interaction term given by the standard NN interaction and determined starting + from the positions in the input register: Hᵢₙₜ = ∑ᵢⱼ C₆/rᵢⱼ⁶ nᵢnⱼ + + * a detuning term which corresponding to a n_i = (1+sigma_i^z)/2 applied to + all the qubits. If the `addressable_detuning` flag is set to True, the routine + effectively a local n_i = (1+sigma_i^z)/2 term in the + evolved Hamiltonian with a different coefficient for each atom. These + coefficients determine a local addressing pattern for the detuning on a subset + of the qubits. In this routine, the coefficients are variational parameters + and they will therefore be optimized at each optimizer step + + * a drive term which corresponding to a sigma^x evolution operation applied to + all the qubits. If the `addressable_drive` flag is set to True, the routine + effectively a local sigma_i^x term in the evolved Hamiltonian with a different + coefficient for each atom. These coefficients determine a local addressing pattern + for the drive on a subset of the qubits. In this routine, the coefficients are + variational parameters and they will therefore be optimized at each optimizer step + + * if the `tunable_phase` flag is set to True, the drive term is modified in the following + way: drive = cos(phi) * sigma^x - sin(phi) * sigma^y + The addressable pattern above is maintained and the phase is considered just as an + additional variational parameter which is optimized with the rest + + Notice that, on real devices, the coefficients assigned to each qubit in both the detuning + and drive patterns should be non-negative and they should always sum to 1. This is not the + case for the implementation in this routine since the coefficients (weights) do not have any + constraint. Therefore, this HEA is not completely realizable on neutral atom devices. + + Args: + register: the input atomic register with Cartesian coordinates. + n_layers: number layers in the HEA, each layer includes a drive, detuning and + pure interaction pulses whose is a variational parameter + addressable_detuning: whether to turn on the trainable semi-local addressing pattern + on the detuning (n_i terms in the Hamiltonian) + addressable_drive: whether to turn on the trainable semi-local addressing pattern + on the drive (sigma_i^x terms in the Hamiltonian) + tunable_phase: whether to have a tunable phase to get both sigma^x and sigma^y rotations + in the drive term. If False, only a sigma^x term will be included in the drive part + of the Hamiltonian generator + additional_prefix: an additional prefix to attach to the parameter names + + Returns: + The Rydberg HEA block + """ + n_qubits = register.n_qubits + prefix = "" if additional_prefix is None else "_" + additional_prefix + + detunings = None + # add a detuning pattern locally addressing the atoms + if addressable_detuning: + detunings = [qd.VariationalParameter(f"detmap_{j}") for j in range(n_qubits)] + + drives = None + # add a drive pattern locally addressing the atoms + if addressable_drive: + drives = [qd.VariationalParameter(f"drivemap_{j}") for j in range(n_qubits)] + + phase = None + if tunable_phase: + phase = qd.VariationalParameter("phase") + + return chain( + rydberg_hea_layer( + register, + VariationalParameter(f"At{prefix}_{layer}"), + VariationalParameter(f"Omega{prefix}_{layer}"), + VariationalParameter(f"wait{prefix}_{layer}"), + detunings=detunings, + drives=drives, + phase=phase, + ) + for layer in range(n_layers) + ) diff --git a/qadence_libs/constructors/utils.py b/qadence_libs/constructors/utils.py new file mode 100644 index 0000000..9d48db4 --- /dev/null +++ b/qadence_libs/constructors/utils.py @@ -0,0 +1 @@ +from __future__ import annotations diff --git a/qadence_libs/main.py b/qadence_libs/main.py deleted file mode 100644 index 6e12d88..0000000 --- a/qadence_libs/main.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - -from typing import Optional - - -def main(str_to_add: Optional[str] = None) -> str: - msg = "Pasqal template Python project" - if str_to_add is not None: - msg += str_to_add - return msg - - -if __name__ == "__main__": - msg = main(str_to_add="from same file") - print(msg) diff --git a/qadence_libs/types.py b/qadence_libs/types.py new file mode 100644 index 0000000..f5b91ca --- /dev/null +++ b/qadence_libs/types.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from qadence.types import StrEnum + +__all__ = ["BasisSet", "ReuploadScaling"] + + +class BasisSet(StrEnum): + """Basis set for feature maps.""" + + FOURIER = "Fourier" + """Fourier basis set.""" + CHEBYSHEV = "Chebyshev" + """Chebyshev polynomials of the first kind.""" + + +class ReuploadScaling(StrEnum): + """Scaling for data reuploads in feature maps.""" + + CONSTANT = "Constant" + """Constant scaling.""" + TOWER = "Tower" + """Linearly increasing scaling.""" + EXP = "Exponential" + """Exponentially increasing scaling.""" diff --git a/tests/constructors/test_ansatz.py b/tests/constructors/test_ansatz.py new file mode 100644 index 0000000..4be247b --- /dev/null +++ b/tests/constructors/test_ansatz.py @@ -0,0 +1,164 @@ +from __future__ import annotations + +import pytest +from qadence import ( + CNOT, + CRX, + RX, + RY, + RZ, + Interaction, + QuantumCircuit, + QuantumModel, + VariationalParameter, + Z, + chain, + hamiltonian_factory, + kron, + random_state, + run, +) +from qadence.blocks import AbstractBlock, has_duplicate_vparams +from qadence.types import Strategy +from torch import Size, allclose + +from qadence_libs.constructors import hea, identity_initialized_ansatz + + +@pytest.mark.parametrize("n_qubits", [2, 3]) +@pytest.mark.parametrize("depth", [2, 3]) +@pytest.mark.parametrize("entangler", [CNOT, CRX]) +def test_hea_duplicate_params(n_qubits: int, depth: int, entangler: AbstractBlock) -> None: + """Tests that HEAs are initialized with correct parameter namings.""" + hea1 = hea(n_qubits=n_qubits, depth=depth, operations=[RZ, RX, RZ], entangler=entangler) + hea2 = hea(n_qubits=n_qubits, depth=depth, operations=[RZ, RX, RZ], entangler=entangler) + block1 = chain(hea1, hea2) + assert has_duplicate_vparams(block1) + hea1 = hea( + n_qubits=n_qubits, + depth=depth, + operations=[RZ, RX, RZ], + entangler=entangler, + param_prefix="0", + ) + hea2 = hea( + n_qubits=n_qubits, + depth=depth, + operations=[RZ, RX, RZ], + entangler=entangler, + param_prefix="1", + ) + block2 = chain(hea1, hea2) + assert not has_duplicate_vparams(block2) + + +@pytest.mark.parametrize("n_qubits", [2, 3]) +@pytest.mark.parametrize("depth", [2, 3]) +@pytest.mark.parametrize("hamiltonian", ["fixed_global", "parametric_local"]) +def test_hea_sDAQC(n_qubits: int, depth: int, hamiltonian: str) -> None: + if hamiltonian == "fixed_global": + entangler = hamiltonian_factory(n_qubits, interaction=Interaction.NN) + if hamiltonian == "parametric_local": + x = VariationalParameter("x") + entangler = x * kron(Z(0), Z(1)) + hea1 = hea( + n_qubits=n_qubits, + depth=depth, + operations=[RZ, RX, RZ], + entangler=entangler, + strategy=Strategy.SDAQC, + ) + # Variational parameters in the digital-analog entangler + # are not created automatically by the hea function, but + # by passing them in the entangler. Thus for depth larger + # than 1 we do get duplicate vparams: + if hamiltonian == "fixed_global": + assert not has_duplicate_vparams(hea1) + if hamiltonian == "parametric_local": + assert has_duplicate_vparams(hea1) + + +@pytest.mark.parametrize("n_qubits", [2, 5]) +@pytest.mark.parametrize("depth", [2, 4]) +@pytest.mark.parametrize("strategy", [Strategy.DIGITAL, Strategy.SDAQC]) +def test_hea_forward(n_qubits: int, depth: int, strategy: Strategy) -> None: + hea1 = hea( + n_qubits=n_qubits, + depth=depth, + operations=[RZ, RX, RZ], + strategy=strategy, + ) + circuit = QuantumCircuit(n_qubits, hea1) + model = QuantumModel(circuit) + + wf = model.run({}) + assert wf.shape == Size([1, 2**n_qubits]) + + +@pytest.mark.parametrize("n_qubits", [2, 3]) +@pytest.mark.parametrize("depth", [2, 3]) +@pytest.mark.parametrize("entangler", [CNOT, CRX]) +def test_iia_duplicate_params(n_qubits: int, depth: int, entangler: AbstractBlock) -> None: + """Tests that IIAs are initialized with correct parameter namings.""" + iia1 = identity_initialized_ansatz( + n_qubits=n_qubits, depth=depth, rotations=[RZ, RX, RZ], entangler=entangler + ) + iia2 = identity_initialized_ansatz( + n_qubits=n_qubits, depth=depth, rotations=[RZ, RX, RZ], entangler=entangler + ) + block = chain(iia1, iia2) + assert has_duplicate_vparams(block) + + +@pytest.mark.parametrize("n_qubits", [2, 5]) +@pytest.mark.parametrize("depth", [2, 4]) +@pytest.mark.parametrize("entangler", [CNOT, CRX]) +def test_iia_forward(n_qubits: int, depth: int, entangler: AbstractBlock) -> None: + iia = identity_initialized_ansatz( + n_qubits=n_qubits, depth=depth, rotations=[RZ, RX, RZ], entangler=entangler + ) + circuit = QuantumCircuit(n_qubits, iia) + model = QuantumModel(circuit) + + wf = model.run({}) + assert wf.shape == Size([1, 2**n_qubits]) + + +@pytest.mark.parametrize("n_qubits", [2, 5]) +@pytest.mark.parametrize("depth", [2, 4]) +@pytest.mark.parametrize("entangler", [CNOT, CRX]) +@pytest.mark.parametrize("ops", [[RX, RZ], [RX, RZ, RX]]) +def test_iia_value( + n_qubits: int, depth: int, entangler: AbstractBlock, ops: list[AbstractBlock] +) -> None: + iia = identity_initialized_ansatz( + n_qubits=n_qubits, depth=depth, rotations=ops, entangler=entangler + ) + state = random_state(n_qubits) + assert allclose(state, run(iia, state=state)) + + +@pytest.mark.parametrize("n_qubits", [2, 3]) +@pytest.mark.parametrize("depth", [2, 3]) +@pytest.mark.parametrize("hamiltonian", ["fixed_global", "parametric_local"]) +def test_iia_sDAQC(n_qubits: int, depth: int, hamiltonian: str) -> None: + if hamiltonian == "fixed_global": + entangler = hamiltonian_factory(n_qubits, interaction=Interaction.NN) + if hamiltonian == "parametric_local": + x = VariationalParameter("x") + entangler = x * kron(Z(0), Z(1)) + iia = identity_initialized_ansatz( + n_qubits=n_qubits, + depth=depth, + strategy=Strategy.SDAQC, + rotations=[RX, RY], + entangler=entangler, + ) + # Variational parameters in the digital-analog entangler + # are not created automatically by the hea function, but + # by passing them in the entangler. Thus for depth larger + # than 1 we do get duplicate vparams: + if hamiltonian == "fixed_global": + assert not has_duplicate_vparams(iia) + if hamiltonian == "parametric_local": + assert has_duplicate_vparams(iia) diff --git a/tests/constructors/test_feature_maps.py b/tests/constructors/test_feature_maps.py new file mode 100644 index 0000000..d452dd4 --- /dev/null +++ b/tests/constructors/test_feature_maps.py @@ -0,0 +1,141 @@ +from __future__ import annotations + +from typing import Callable + +import pytest +import sympy +import torch +from metrics import ATOL_64 +from qadence.execution import expectation, run +from qadence.operations import PHASE, RX, X, Z +from qadence.parameters import FeatureParameter +from qadence.types import PI, BasisSet, ReuploadScaling + +from qadence_libs.constructors import exp_fourier_feature_map, feature_map + +PARAM_DICT_0 = { + "support": None, + "param": FeatureParameter("x"), + "op": RX, + "feature_range": None, + "multiplier": None, +} + +PARAM_DICT_1 = { + "support": (3, 2, 1, 0), + "param": "x", + "op": PHASE, + "feature_range": (-2.0, -1.0), + "target_range": (1.0, 5.0), + "multiplier": FeatureParameter("y"), + "param_prefix": "w", +} + + +@pytest.mark.parametrize("param_dict", [PARAM_DICT_0, PARAM_DICT_1]) +@pytest.mark.parametrize( + "fm_type", [BasisSet.FOURIER, BasisSet.CHEBYSHEV, sympy.asin, lambda x: x**2] +) +@pytest.mark.parametrize( + "reupload_scaling", + [ + ReuploadScaling.CONSTANT, + ReuploadScaling.TOWER, + ReuploadScaling.EXP, + lambda i: 5 * i + 2, + ], +) +def test_feature_map_creation_and_run( + param_dict: dict, + fm_type: BasisSet | type[sympy.Function], + reupload_scaling: ReuploadScaling | Callable, +) -> None: + n_qubits = 4 + + block = feature_map( + n_qubits=n_qubits, fm_type=fm_type, reupload_scaling=reupload_scaling, **param_dict + ) + + values = {"x": torch.rand(1), "y": torch.rand(1)} + + run(block, values=values) + + +@pytest.mark.parametrize("n_qubits", [3, 4, 5]) +@pytest.mark.parametrize("fm_type", [BasisSet.FOURIER, BasisSet.CHEBYSHEV]) +@pytest.mark.parametrize( + "reupload_scaling", + [ReuploadScaling.TOWER, ReuploadScaling.CONSTANT, ReuploadScaling.EXP, "exp_down"], +) +def test_feature_map_correctness( + n_qubits: int, fm_type: BasisSet, reupload_scaling: ReuploadScaling +) -> None: + support = tuple(range(n_qubits)) + + # Preparing exact result + if fm_type == BasisSet.CHEBYSHEV: + xv = torch.linspace(-0.95, 0.95, 100) + transformed_xv = torch.acos(xv) + feature_range = (-1.0, 1.0) + target_range = (-1.0, 1.0) + elif fm_type == BasisSet.FOURIER: + xv = torch.linspace(0.0, 2 * PI, 100) + transformed_xv = xv + feature_range = (0.0, 2 * PI) + target_range = (0.0, 2 * PI) + + if reupload_scaling == ReuploadScaling.CONSTANT: + + def scaling(j: int) -> float: + return 1 + + elif reupload_scaling == ReuploadScaling.TOWER: + + def scaling(j: int) -> float: + return float(j + 1) + + elif reupload_scaling == ReuploadScaling.EXP: + + def scaling(j: int) -> float: + return float(2**j) + + elif reupload_scaling == "exp_down": + + def scaling(j: int) -> float: + return float(2 ** (n_qubits - j - 1)) + + reupload_scaling = ReuploadScaling.EXP + support = tuple(reversed(range(n_qubits))) + + target = torch.cat( + [torch.cos(scaling(j) * transformed_xv).unsqueeze(1) for j in range(n_qubits)], 1 + ) + + # Running the block expectation + block = feature_map( + n_qubits=n_qubits, + support=support, + param="x", + op=RX, + fm_type=fm_type, + reupload_scaling=reupload_scaling, + feature_range=feature_range, + target_range=target_range, + ) + + yv = expectation(block, [Z(j) for j in range(n_qubits)], values={"x": xv}) + + # Assert correctness + assert torch.allclose(yv, target, atol=ATOL_64) + + +@pytest.mark.parametrize("n_qubits", [3, 4, 5]) +def test_exp_fourier_feature_map_correctness(n_qubits: int) -> None: + block = exp_fourier_feature_map(n_qubits, param="x") + xv = torch.linspace(0.0, 2**n_qubits - 1, 100) + yv = expectation(block, [X(j) for j in range(n_qubits)], values={"x": xv}) + target = torch.cat( + [torch.cos(2 ** (j + 1) * PI * xv / 2**n_qubits).unsqueeze(1) for j in range(n_qubits)], + 1, + ) + assert torch.allclose(yv, target) diff --git a/tests/constructors/test_qft.py b/tests/constructors/test_qft.py new file mode 100644 index 0000000..8106403 --- /dev/null +++ b/tests/constructors/test_qft.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +import pytest +from metrics import ATOL_64 +from qadence import ( + BackendName, + Interaction, + QuantumCircuit, + QuantumModel, + hamiltonian_factory, + random_state, +) +from qadence.states import equivalent_state +from qadence.types import PI, Strategy +from torch import Tensor, allclose, cdouble, exp, matmul, tensor, zeros + +from qadence_libs.constructors import qft + + +def test_qft() -> None: + def qft_matrix(N: int) -> Tensor: + """Textbook QFT unitary matrix to compare to the circuit solution.""" + matrix = zeros((N, N), dtype=cdouble) + w = exp(tensor(2.0j * PI / N, dtype=cdouble)) + for i in range(N): + for j in range(N): + matrix[i, j] = (N ** (-1 / 2)) * w ** (i * j) + return matrix + + n_qubits = 2 + + # First tests that the qft_matrix function is correct for 2-qubits + qft_m_2q = (1 / 2) * tensor( + [ + [1.0 + 0.0j, 1.0 + 0.0j, 1.0 + 0.0j, 1.0 + 0.0j], + [1.0 + 0.0j, 0.0 + 1.0j, -1.0 + 0.0j, 0.0 - 1.0j], + [1.0 + 0.0j, -1.0 + 0.0j, 1.0 + 0.0j, -1.0 + 0.0j], + [1.0 + 0.0j, 0.0 - 1.0j, -1.0 + 0.0j, 0.0 + 1.0j], + ], + dtype=cdouble, + ) + + assert allclose(qft_m_2q, qft_matrix(n_qubits**2), rtol=0.0, atol=ATOL_64) + + # Now loads larger random initial state + n_qubits = 5 + + wf_init = random_state(n_qubits) + + # Runs QFT circuit with swaps to match standard QFT definition + qc_qft = QuantumCircuit(n_qubits, qft(n_qubits, swaps_out=True, strategy=Strategy.DIGITAL)) + model = QuantumModel(qc_qft, backend=BackendName.PYQTORCH) + wf_qft = model.run(values={}, state=wf_init) + + # Checks output with the textbook matrix + wf_textbook = matmul(qft_matrix(2**n_qubits), wf_init[0]) + + assert equivalent_state(wf_qft, wf_textbook.unsqueeze(0), atol=10 * ATOL_64) + + +def test_qft_inverse() -> None: + """Tests that applying qft -> inverse qft returns the initial state.""" + n_qubits = 4 + wf_init = random_state(n_qubits) + qc_qft = QuantumCircuit(n_qubits, qft(n_qubits)) + qc_qft_inv = QuantumCircuit(n_qubits, qft(n_qubits, inverse=True)) + model = QuantumModel(qc_qft, backend=BackendName.PYQTORCH) + model_inv = QuantumModel(qc_qft_inv, backend=BackendName.PYQTORCH) + wf_1 = model.run(values={}, state=wf_init) + wf_2 = model_inv.run(values={}, state=wf_1) + assert equivalent_state(wf_2, wf_init, atol=ATOL_64) + + +@pytest.mark.parametrize( + "param_dict", + [ + {"inverse": False, "reverse_in": False, "swaps_out": False}, + {"inverse": True, "reverse_in": True, "swaps_out": True}, + ], +) +@pytest.mark.parametrize("n_qubits", [1, 2, 5]) +@pytest.mark.parametrize("extra_qubit", [True, False]) +def test_qft_digital_analog(n_qubits: int, extra_qubit: bool, param_dict: dict) -> None: + """Tests that the digital and digital-analog qfts return the same result.""" + + circ_n_qubits = n_qubits + 1 if extra_qubit else n_qubits + + qc_qft_digital = QuantumCircuit( + circ_n_qubits, qft(n_qubits, strategy=Strategy.DIGITAL, **param_dict) + ) + + qft_analog_block = hamiltonian_factory( + circ_n_qubits, interaction=Interaction.NN, random_strength=True + ) + + qc_qft_digital_analog = QuantumCircuit( + circ_n_qubits, + qft(n_qubits, strategy=Strategy.SDAQC, gen_build=qft_analog_block, **param_dict), + ) + model_digital = QuantumModel(qc_qft_digital) + model_analog = QuantumModel(qc_qft_digital_analog) + + wf_init = random_state(circ_n_qubits) + wf_digital = model_digital.run(values={}, state=wf_init) + wf_analog = model_analog.run(values={}, state=wf_init) + + assert equivalent_state(wf_digital, wf_analog, atol=ATOL_64) diff --git a/tests/constructors/test_rydberg_hea.py b/tests/constructors/test_rydberg_hea.py new file mode 100644 index 0000000..06ad8c2 --- /dev/null +++ b/tests/constructors/test_rydberg_hea.py @@ -0,0 +1,124 @@ +from __future__ import annotations + +import pytest +import torch +from qadence.blocks import CompositeBlock +from qadence.blocks.analog import ConstantAnalogRotation +from qadence.circuit import QuantumCircuit +from qadence.constructors import hamiltonian_factory, total_magnetization +from qadence.models import QuantumModel +from qadence.operations import AnalogRY, X +from qadence.parameters import VariationalParameter +from qadence.register import Register +from qadence.types import PI, BasisSet + +from qadence_libs.constructors import ( + analog_feature_map, + rydberg_feature_map, + rydberg_hea, + rydberg_tower_feature_map, +) + + +@pytest.mark.parametrize("detunings", [True, False]) +@pytest.mark.parametrize("drives", [True, False]) +@pytest.mark.parametrize("phase", [True, False]) +def test_rydberg_hea_construction(detunings: bool, drives: bool, phase: bool) -> None: + n_qubits = 4 + n_layers = 2 + register = Register.line(n_qubits) + + ansatz = rydberg_hea( + register, + n_layers=n_layers, + addressable_detuning=detunings, + addressable_drive=drives, + tunable_phase=phase, + ) + assert isinstance(ansatz, CompositeBlock) + assert len(ansatz.blocks) == n_layers + + drive_layer = ansatz.blocks[0].blocks[0] # type:ignore [attr-defined] + wait_layer = ansatz.blocks[0].blocks[1] # type:ignore [attr-defined] + det_layer = ansatz.blocks[0].blocks[2] # type:ignore [attr-defined] + + ndrive_params = len(drive_layer.parameters.names()) + ndet_params = len(det_layer.parameters.names()) + + assert ndet_params == 1 if not detunings else n_qubits + 1 + assert len(wait_layer.parameters.names()) == 1 + if not phase: + # the +2 comes from the time evolution parameter and the scaling factor + assert ndrive_params == 1 if not drives else n_qubits + 2 + else: + assert ndrive_params == 4 if not drives else n_qubits + 4 + + +def test_rydberg_hea_differentiation() -> None: + n_qubits = 4 + n_layers = 2 + register = Register.line(n_qubits) + + ansatz = rydberg_hea( + register, + n_layers=n_layers, + addressable_detuning=True, + addressable_drive=True, + tunable_phase=True, + ) + + circuit = QuantumCircuit(n_qubits, ansatz) + observable = hamiltonian_factory(register, detuning=X) + model = QuantumModel(circuit, observable=observable) + + expval = model.expectation({}) + expval.backward() + for p in model.parameters(): + if p.requires_grad: + assert p.grad is not None + + +@pytest.mark.parametrize("basis", [BasisSet.FOURIER, BasisSet.CHEBYSHEV]) +def test_analog_feature_map(basis: BasisSet) -> None: + pname = "x" + mname = "mult" + fm = analog_feature_map( + param=pname, op=AnalogRY, fm_type=basis, multiplier=VariationalParameter(mname) + ) + assert isinstance(fm, ConstantAnalogRotation) + assert fm.parameters.phase == -PI / 2 + assert fm.parameters.delta == 0.0 + + params = list(fm.parameters.alpha.free_symbols) + assert len(params) == 2 + assert pname in params and mname in params + + +@pytest.mark.parametrize("weights", [None, [1.0, 2.0, 3.0, 4.0]]) +def test_rydberg_feature_map(weights: list[float] | None) -> None: + n_qubits = 4 + + fm = rydberg_feature_map(n_qubits, param="x", weights=weights) + assert len(fm) == n_qubits + assert all([isinstance(b, ConstantAnalogRotation) for b in fm.blocks]) + + circuit = QuantumCircuit(n_qubits, fm) + observable = total_magnetization(n_qubits) + model = QuantumModel(circuit, observable=observable) + + values = {"x": torch.rand(1)} + expval = model.expectation(values) + expval.backward() + for p in model.parameters(): + if p.requires_grad: + assert p.grad is not None + + +def test_rydberg_tower_feature_map() -> None: + n_qubits = 4 + + fm1 = rydberg_tower_feature_map(n_qubits, param="x") + fm2 = rydberg_feature_map(n_qubits, param="x", weights=[1.0, 2.0, 3.0, 4.0]) + + for b1, b2 in zip(fm1.blocks, fm2.blocks): + assert b1.parameters.alpha == b2.parameters.alpha # type:ignore [attr-defined] diff --git a/tests/metrics.py b/tests/metrics.py new file mode 100644 index 0000000..0d0a95c --- /dev/null +++ b/tests/metrics.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from qadence.types import BackendName + +ATOL_64 = 1e-14 # 64 bit precision +ATOL_32 = 1e-07 # 32 bit precision +ATOL_E6 = 1e-06 # some tests do not pass ATOL_32; to fix +LOW_ACCEPTANCE = 2.0e-2 +MIDDLE_ACCEPTANCE = 6.0e-2 +HIGH_ACCEPTANCE = 0.5 +JS_ACCEPTANCE = 7.5e-2 +PSR_ACCEPTANCE = 1e-5 +GPSR_ACCEPTANCE = 1e-1 +ADJOINT_ACCEPTANCE = ATOL_E6 +PULSER_GPSR_ACCEPTANCE = 6.0e-2 +ATOL_DICT = { + BackendName.PYQTORCH: ATOL_32, + BackendName.HORQRUX: ATOL_32, + BackendName.PULSER: 1e-02, + BackendName.BRAKET: 1e-02, +} +MAX_COUNT_DIFF = 20 +SMALL_SPACING = 7.0 +LARGE_SPACING = 30.0 +DIGITAL_DECOMP_ACCEPTANCE_HIGH = 1e-2 +DIGITAL_DECOMP_ACCEPTANCE_LOW = 1e-3 diff --git a/tests/test_main.py b/tests/test_main.py deleted file mode 100644 index bcb6efe..0000000 --- a/tests/test_main.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import annotations - -from qadence_libs.main import main - -expected_msg = "Pasqal template Python project" - - -def test_main() -> None: - msg = main() - assert msg == expected_msg - - -def test_main_with_str() -> None: - str_to_add = "with added str" - msg = main(str_to_add=str_to_add) - assert msg == expected_msg + str_to_add