diff --git a/.github/workflows/update-dev.yml b/.github/workflows/update-dev.yml index 55d89fa0db..5464f47380 100644 --- a/.github/workflows/update-dev.yml +++ b/.github/workflows/update-dev.yml @@ -27,12 +27,29 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 + with: + token: ${{ secrets.NIGHTLY_TOKEN }} + fetch-depth: 0 # We want entire git-history to avoid any merge conflicts - name: Nightly Merge - uses: robotology/gh-action-nightly-merge@v1.3.3 - with: - stable_branch: 'master' - development_branch: 'dev' - allow_ff: false env: - GITHUB_TOKEN: ${{ secrets.NIGHTLY_TOKEN }} + CONFIG_USERNAME: GitHub Nightly Merge Action + CONFIG_EMAIL: actions@github.com + MERGE_HEAD: master + MERGE_BASE: dev + MERGE_ARGS: --no-ff --allow-unrelated-histories --no-edit + run: | + # This script is adapted from the robotology/gh-action-nightly-merge@v1.5.2 GitHub action: + # https://github.com/robotology/gh-action-nightly-merge/blob/master/entrypoint.sh + + git config --global user.name "$CONFIG_USERNAME" + git config --global user.email "$CONFIG_EMAIL" + + git fetch origin $MERGE_HEAD + (git checkout $MERGE_HEAD && git pull origin $MERGE_HEAD) + + git fetch origin $MERGE_BASE + (git checkout $MERGE_BASE && git pull origin $MERGE_BASE) + + git merge $MERGE_ARGS $MERGE_HEAD + git push origin $MERGE_BASE diff --git a/demonstrations/quantum_volume.metadata.json b/demonstrations/quantum_volume.metadata.json index 5a6a18c13e..9e45b3ab88 100644 --- a/demonstrations/quantum_volume.metadata.json +++ b/demonstrations/quantum_volume.metadata.json @@ -2,11 +2,11 @@ "title": "Quantum volume", "authors": [ { - "id": "olivia_di_matteo" + "username": "glassnotes" } ], "dateOfPublication": "2020-12-15T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-11T00:00:00+00:00", "categories": [ "Quantum Hardware", "Quantum Computing" diff --git a/demonstrations/quantum_volume.py b/demonstrations/quantum_volume.py index 3f696645da..7dc9e43f48 100644 --- a/demonstrations/quantum_volume.py +++ b/demonstrations/quantum_volume.py @@ -888,8 +888,3 @@ def heavy_output_set(m, probs): # and Operating Systems (pp. 1001–1014) # (2019). `__ New York, NY, # USA: Association for Computing Machinery. -# -# -# About the author -# ---------------- -# .. include:: ../_static/authors/olivia_di_matteo.txt \ No newline at end of file diff --git a/demonstrations/tutorial_QUBO.metadata.json b/demonstrations/tutorial_QUBO.metadata.json index 2c6477d250..b2000eba3f 100644 --- a/demonstrations/tutorial_QUBO.metadata.json +++ b/demonstrations/tutorial_QUBO.metadata.json @@ -27,5 +27,6 @@ "basedOnPapers": [], "referencedByPapers": [], "relatedContent": [ - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/quadratic-unconstrained-binary-optimization-qubo-demo/7339" } diff --git a/demonstrations/tutorial_barren_plateaus.metadata.json b/demonstrations/tutorial_barren_plateaus.metadata.json index aeba0c1834..9226215d4c 100644 --- a/demonstrations/tutorial_barren_plateaus.metadata.json +++ b/demonstrations/tutorial_barren_plateaus.metadata.json @@ -2,11 +2,11 @@ "title": "Barren plateaus in quantum neural networks", "authors": [ { - "id": "shahnawaz_ahmed" + "username": "quantshah" } ], "dateOfPublication": "2019-10-11T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-15T00:00:00+00:00", "categories": [ "Optimization" ], diff --git a/demonstrations/tutorial_classical_shadows.metadata.json b/demonstrations/tutorial_classical_shadows.metadata.json index c25ea6792d..b18fce96ef 100644 --- a/demonstrations/tutorial_classical_shadows.metadata.json +++ b/demonstrations/tutorial_classical_shadows.metadata.json @@ -2,14 +2,14 @@ "title": "Classical shadows", "authors": [ { - "id": "roeland_wiersema" + "username": "therooler" }, { "id": "brian_doolittle" } ], "dateOfPublication": "2021-06-14T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-14T00:00:00+00:00", "categories": [ "Algorithms" ], @@ -69,5 +69,6 @@ "id": "tutorial_quantum_metrology", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/classical-shadows-in-calculating-fidelity/3727" } diff --git a/demonstrations/tutorial_data_reuploading_classifier.metadata.json b/demonstrations/tutorial_data_reuploading_classifier.metadata.json index bc8f17bc73..74a9b07dfc 100644 --- a/demonstrations/tutorial_data_reuploading_classifier.metadata.json +++ b/demonstrations/tutorial_data_reuploading_classifier.metadata.json @@ -2,11 +2,11 @@ "title": "Data-reuploading classifier", "authors": [ { - "id": "shahnawaz_ahmed" + "username": "quantshah" } ], "dateOfPublication": "2019-10-11T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-15T00:00:00+00:00", "categories": [ "Quantum Machine Learning" ], diff --git a/demonstrations/tutorial_diffable-mitigation.py b/demonstrations/tutorial_diffable-mitigation.py index bdd0029a33..fa37ae970b 100644 --- a/demonstrations/tutorial_diffable-mitigation.py +++ b/demonstrations/tutorial_diffable-mitigation.py @@ -42,7 +42,7 @@ Thus, we can improve the estimates of observables without breaking the differentiable workflow of our variational algorithm. We will briefly introduce these functionalities and afterwards go more in depth to explore what happens under the hood. -We start by initializing a noisy device under the :class:`~.pennylane.DepolarizingChannel`: +We start by initializing a noisy device using a noise model with :class:`~.pennylane.DepolarizingChannel` errors: """ import pennylane as qml @@ -54,13 +54,14 @@ n_wires = 4 np.random.seed(1234) -# Describe noise -noise_gate = qml.DepolarizingChannel -noise_strength = 0.05 +# Describe noise model +fcond = qml.noise.wires_in(range(n_wires)) +noise = qml.noise.partial_wires(qml.DepolarizingChannel, 0.05) +noise_model = qml.NoiseModel({fcond: noise}) # Load devices dev_ideal = qml.device("default.mixed", wires=n_wires) -dev_noisy = qml.transforms.insert(dev_ideal, noise_gate, noise_strength, position="all") +dev_noisy = qml.add_noise(dev_ideal, noise_model=noise_model) ############################################################################## # We are going to use the transverse field Ising model Hamiltonian :math:`H = - \sum_i X_i X_{i+1} + 0.5 \sum_i Z_i` as our observable: @@ -85,8 +86,9 @@ def qfunc(w1, w2): qml.SimplifiedTwoDesign(w1, w2, wires=range(n_wires)) return qml.expval(H) -qnode_noisy = qml.QNode(qfunc, dev_noisy) qnode_ideal = qml.QNode(qfunc, dev_ideal) +qnode_noisy = qml.QNode(qfunc, dev_noisy) +qnode_noisy = qml.transforms.decompose(qnode_noisy, gate_set = ["RY", "CZ"]) ############################################################################## # We can then simply transform the noisy QNode :math:`f^{⚡}` with :func:`~.pennylane.transforms.mitigate_with_zne` to generate :math:`\tilde{f}.` diff --git a/demonstrations/tutorial_eqnn_force_field.metadata.json b/demonstrations/tutorial_eqnn_force_field.metadata.json index f4d7cb8b0a..8a01021832 100644 --- a/demonstrations/tutorial_eqnn_force_field.metadata.json +++ b/demonstrations/tutorial_eqnn_force_field.metadata.json @@ -2,14 +2,14 @@ "title": "Symmetry-invariant quantum machine learning force fields", "authors": [ { - "id": "oriel_kiss" + "username": "orielkiss" }, { "id": "isabel_nha_minh_le" } ], "dateOfPublication": "2024-03-12T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-15T00:00:00+00:00", "categories": [ "Quantum Machine Learning", "Quantum Chemistry" diff --git a/demonstrations/tutorial_error_mitigation.py b/demonstrations/tutorial_error_mitigation.py index 07285cf664..e75f6f452f 100644 --- a/demonstrations/tutorial_error_mitigation.py +++ b/demonstrations/tutorial_error_mitigation.py @@ -49,31 +49,35 @@ Mitigating noise in a simple circuit ------------------------------------ -We first need a noisy device to execute our circuit on. Let's keep things simple for now by loading -the :mod:`default.mixed ` simulator and artificially adding -:class:`PhaseDamping ` noise. +We first need a noisy device to execute our circuit on. Let's keep things simple +for now by loading the :mod:`default.mixed ` simulator +and artificially adding :class:`PhaseDamping ` noise using a +:class:`NoiseModel `. """ import pennylane as qml n_wires = 4 -# Describe noise -noise_gate = qml.PhaseDamping -noise_strength = 0.1 +# Describe noise model +fcond = qml.noise.wires_in(range(n_wires)) +noise = qml.noise.partial_wires(qml.PhaseDamping, 0.1) +noise_model = qml.NoiseModel({fcond: noise}) # Load devices dev_ideal = qml.device("default.mixed", wires=n_wires) -dev_noisy = qml.transforms.insert(dev_ideal, noise_gate, noise_strength) +dev_noisy = qml.add_noise(dev_ideal, noise_model=noise_model) ############################################################################### -# In the above, we load a noise-free device ``dev_ideal`` and a noisy device ``dev_noisy``, which -# is constructed from the :func:`qml.transforms.insert ` transform. -# This transform works by intercepting each circuit executed on the device and adding the -# :class:`PhaseDamping ` noise channel directly after every gate in the -# circuit. To get a better understanding of noise channels like -# :class:`PhaseDamping `, check out the :doc:`tutorial_noisy_circuits` -# tutorial. +# In the above, we load a noise-free device ``dev_ideal`` and a noisy device ``dev_noisy``, +# which is constructed from the :func:`qml.add_noise ` +# transform. This transform works by intercepting each circuit executed on the device and +# adding the noise to it based on the ``noise_model``. For example, in this case, it will +# add :class:`PhaseDamping ` noise channel after every gate in the +# circuit acting on wires :math:`[0, 1, 2, 3]`. To get a better understanding of noise +# channels like :class:`PhaseDamping ` and using noise models, +# check out the :doc:`tutorial_noisy_circuits` and :doc:`tutorial_how_to_use_noise_models` +# tutorials, respectively. # # The next step is to define our circuit. Inspired by the mirror circuits concept introduced by # Proctor *et al.* [#proctor2020measuring]_ let's fix a circuit that applies a unitary :math:`U` @@ -112,6 +116,7 @@ def circuit(w1, w2): ideal_qnode = qml.QNode(circuit, dev_ideal) noisy_qnode = qml.QNode(circuit, dev_noisy) +noisy_qnode = qml.transforms.decompose(noisy_qnode, gate_set = ["RY", "CZ"]) ############################################################################## # First, we'll visualize the circuit: @@ -490,6 +495,7 @@ def qchem_circuit(phi): ideal_energy = qml.QNode(qchem_circuit, dev_ideal) noisy_energy = qml.QNode(qchem_circuit, dev_noisy) + noisy_energy = qml.transforms.decompose(noisy_energy, gate_set=["RX", "RY", "RZ", "CNOT"]) ideal_energies.append(ideal_energy(phi)) noisy_energies.append(noisy_energy(phi)) @@ -517,6 +523,7 @@ def qchem_circuit(phi): qml.DoubleExcitation(phi, wires=range(n_wires)), ] circuit = qml.tape.QuantumTape(ops) + [circuit], _ = qml.transforms.decompose(circuit, gate_set=["RX", "RY", "RZ", "CNOT"]) # Define custom executor that expands Hamiltonian measurement # into a linear combination of tensor products of Pauli diff --git a/demonstrations/tutorial_fermionic_operators.metadata.json b/demonstrations/tutorial_fermionic_operators.metadata.json index d23b2be902..6ac634f0ab 100644 --- a/demonstrations/tutorial_fermionic_operators.metadata.json +++ b/demonstrations/tutorial_fermionic_operators.metadata.json @@ -6,7 +6,7 @@ } ], "dateOfPublication": "2023-06-27T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-30T00:00:00+00:00", "categories": [ "Quantum Chemistry" ], diff --git a/demonstrations/tutorial_fermionic_operators.py b/demonstrations/tutorial_fermionic_operators.py index 1f420fcff7..b41b286cd1 100644 --- a/demonstrations/tutorial_fermionic_operators.py +++ b/demonstrations/tutorial_fermionic_operators.py @@ -40,7 +40,7 @@ fermi_word = a0_dag * a1 fermi_sentence = 1.3 * a0_dag * a1 + 2.4 * a1 -fermi_sentence +print(fermi_sentence) ############################################################################## # In this simple example, we first created the operator :math:`a^{\dagger}_0 a_1` and then created @@ -48,7 +48,7 @@ # arithmetic operations between Fermi words and Fermi sentences. fermi_sentence = fermi_sentence * fermi_word + 2.3 * fermi_word -fermi_sentence +print(fermi_sentence) ############################################################################## # Beyond multiplication, summation, and subtraction, we can exponentiate fermionic operators in @@ -61,7 +61,7 @@ # in the same way that you would write down the operator on a piece of paper: fermi_sentence = 1.2 * a0_dag + 0.5 * a1 - 2.3 * (a0_dag * a1) ** 2 -fermi_sentence +print(fermi_sentence) ############################################################################## # This Fermi sentence can be mapped to the qubit basis using the diff --git a/demonstrations/tutorial_grovers_algorithm.metadata.json b/demonstrations/tutorial_grovers_algorithm.metadata.json index c4dab1bb37..e8feb2d44d 100644 --- a/demonstrations/tutorial_grovers_algorithm.metadata.json +++ b/demonstrations/tutorial_grovers_algorithm.metadata.json @@ -34,5 +34,6 @@ "id": "tutorial_qft_arithmetics", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/understanding-grover/3340" } diff --git a/demonstrations/tutorial_haar_measure.metadata.json b/demonstrations/tutorial_haar_measure.metadata.json index 97b58dacbf..eea684494c 100644 --- a/demonstrations/tutorial_haar_measure.metadata.json +++ b/demonstrations/tutorial_haar_measure.metadata.json @@ -2,11 +2,11 @@ "title": "Understanding the Haar measure", "authors": [ { - "id": "olivia_di_matteo" + "username": "glassnotes" } ], "dateOfPublication": "2021-03-22T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-11T00:00:00+00:00", "categories": [ "Quantum Machine Learning", "Quantum Computing" @@ -139,5 +139,6 @@ "id": "tutorial_barren_plateaus", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/understanding-the-haar-measure-demo/7334" } diff --git a/demonstrations/tutorial_haar_measure.py b/demonstrations/tutorial_haar_measure.py index 30856038e8..5807b5ab34 100644 --- a/demonstrations/tutorial_haar_measure.py +++ b/demonstrations/tutorial_haar_measure.py @@ -809,8 +809,4 @@ def qr_haar_random_unitary(): # Z. Holmes, K. Sharma, M. Cerezo, and P. J. Coles (2021) "Connecting ansatz # expressibility to gradient magnitudes and barren plateaus". (`arXiv # `__) -# -# -# About the author -# ---------------- -# .. include:: ../_static/authors/olivia_di_matteo.txt \ No newline at end of file +# \ No newline at end of file diff --git a/demonstrations/tutorial_implicit_diff_susceptibility.metadata.json b/demonstrations/tutorial_implicit_diff_susceptibility.metadata.json index a336b36d71..14a4c5e601 100644 --- a/demonstrations/tutorial_implicit_diff_susceptibility.metadata.json +++ b/demonstrations/tutorial_implicit_diff_susceptibility.metadata.json @@ -2,14 +2,14 @@ "title": "Implicit differentiation of variational quantum algorithms", "authors": [ { - "id": "shahnawaz_ahmed" + "username": "quantshah" }, { "id": "juan_felipe_carrasquilla_alvarez" } ], "dateOfPublication": "2022-11-28T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-15T00:00:00+00:00", "categories": [ "Optimization" ], diff --git a/demonstrations/tutorial_intro_qsvt.metadata.json b/demonstrations/tutorial_intro_qsvt.metadata.json index 80af77e7bc..2a2d3d128e 100644 --- a/demonstrations/tutorial_intro_qsvt.metadata.json +++ b/demonstrations/tutorial_intro_qsvt.metadata.json @@ -64,5 +64,6 @@ "id": "function_fitting_qsp", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/qsvt-algorithm-computing-the-angles-for-projector-controlled-phase-gate/3203" } diff --git a/demonstrations/tutorial_kernel_based_training.metadata.json b/demonstrations/tutorial_kernel_based_training.metadata.json index 9b3434187e..b0f2561717 100644 --- a/demonstrations/tutorial_kernel_based_training.metadata.json +++ b/demonstrations/tutorial_kernel_based_training.metadata.json @@ -28,5 +28,6 @@ "id": "tutorial_variational_classifier", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/kernel-based-training-demonstration/1017" } diff --git a/demonstrations/tutorial_lcu_blockencoding.metadata.json b/demonstrations/tutorial_lcu_blockencoding.metadata.json index d9f2d1c57a..a6f33b3e8e 100644 --- a/demonstrations/tutorial_lcu_blockencoding.metadata.json +++ b/demonstrations/tutorial_lcu_blockencoding.metadata.json @@ -55,5 +55,6 @@ "id": "tutorial_apply_qsvt", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/lcu-demo-comment/7316" } diff --git a/demonstrations/tutorial_optimal_control.metadata.json b/demonstrations/tutorial_optimal_control.metadata.json index 62597e4f5d..8670f733d2 100644 --- a/demonstrations/tutorial_optimal_control.metadata.json +++ b/demonstrations/tutorial_optimal_control.metadata.json @@ -6,7 +6,7 @@ } ], "dateOfPublication": "2023-08-08T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-30T00:00:00+00:00", "categories": [ "Optimization", "Quantum Computing", diff --git a/demonstrations/tutorial_optimal_control.py b/demonstrations/tutorial_optimal_control.py index fad987b41d..2d1ac0d8bb 100644 --- a/demonstrations/tutorial_optimal_control.py +++ b/demonstrations/tutorial_optimal_control.py @@ -504,7 +504,7 @@ def profit(params): # Initial parameters for the start and end times of the rectangles times = [jnp.linspace(eps, T - eps, P * 2) for op in ops_param] # All initial parameters: small alternating amplitudes and times -params = [jnp.hstack([[0.1 * (-1) ** i for i in range(P)], time]) for time in times] +params = [jnp.hstack([jnp.array([0.1 * (-1) ** i for i in range(P)]), time]) for time in times] ############################################################################# # Now we are all set up to train the parameters of the pulse sequence to produce @@ -679,7 +679,7 @@ def profit(params): # produced plot. times = [jnp.linspace(eps, T - eps, P * 2) for op in ops_param] -params = [jnp.hstack([[0.2 * (-1) ** i for i in range(P)], time]) for time in times] +params = [jnp.hstack([jnp.array([0.2 * (-1) ** i for i in range(P)]), time]) for time in times] num_steps = 1200 learning_rate = -2e-3 diff --git a/demonstrations/tutorial_photonics.metadata.json b/demonstrations/tutorial_photonics.metadata.json index 9c194ba650..56d9a92c12 100644 --- a/demonstrations/tutorial_photonics.metadata.json +++ b/demonstrations/tutorial_photonics.metadata.json @@ -137,5 +137,6 @@ "id": "gbs", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/photonic-quantum-computers-demo/7335" } diff --git a/demonstrations/tutorial_post-variational_quantum_neural_networks.py b/demonstrations/tutorial_post-variational_quantum_neural_networks.py index 9a5cdd50d7..0d72abcda0 100644 --- a/demonstrations/tutorial_post-variational_quantum_neural_networks.py +++ b/demonstrations/tutorial_post-variational_quantum_neural_networks.py @@ -4,20 +4,20 @@ """ ###################################################################### -# You're sitting in front of your quantum computer, excitement buzzing through your veins as your -# carefully crafted Ansatz for a variational algorithm is finally ready. But oh buttersticks — -# after a few hundred iterations, your heart sinks as you realize you have encountered the dreaded barren plateau problem, where -# gradients vanish and optimisation grinds to a halt. What now? Panic sets in, but then you remember the new technique -# you read about. You reach into your toolbox and pull out the *post-variational strategy*. This approach shifts -# parameters from the quantum computer to classical computers, ensuring the convergence to a local minimum. By combining -# fixed quantum circuits with a classical neural network, you can enhance trainability and keep your +# You're sitting in front of your quantum computer, excitement buzzing through your veins as your +# carefully crafted Ansatz for a variational algorithm is finally ready. But oh buttersticks — +# after a few hundred iterations, your heart sinks as you realize you have encountered the dreaded barren plateau problem, where +# gradients vanish and optimisation grinds to a halt. What now? Panic sets in, but then you remember the new technique +# you read about. You reach into your toolbox and pull out the *post-variational strategy*. This approach shifts +# parameters from the quantum computer to classical computers, ensuring the convergence to a local minimum. By combining +# fixed quantum circuits with a classical neural network, you can enhance trainability and keep your # research on track. -# +# # This tutorial introduces post-variational quantum neural networks with example code from PennyLane and JAX. -# We build variational and post-variational networks through a step-by-step process, and compare their +# We build variational and post-variational networks through a step-by-step process, and compare their # performance on the `digits dataset `__. # -# +# ###################################################################### # Enter post-variational strategies @@ -28,7 +28,7 @@ # transformed by an ansatz :math:`U(\theta)`, and the parameters :math:`\theta` are optimized by # evaluating gradients of the quantum circuit [#schuld2019evaluating]_ and calculating updates of the parameter on a classical # computer. :doc:`Variational algorithms ` are a prerequisite to this article. -# +# # However, many ansätze in the variational strategy face the barren plateau problem [#mcclean2018barren]_, which leads to difficulty in convergence # using :doc:`gradient-based ` optimization techniques. Due to the general difficulty and lack of training gurantees # of variational algorithms, here we will develop an alternative training strategy that does not involve tuning @@ -37,10 +37,10 @@ ###################################################################### # |image1| -# +# # .. |image1| image:: ../_static/demonstration_assets/post-variational_quantum_neural_networks/PVdrawing.jpeg # :width: 100.0% -# +# ###################################################################### # In this Demo we will also discuss “post-variational strategies” proposed in Ref. [#huang2024postvariational]_. We take the classical combination of @@ -50,38 +50,38 @@ # expressibility [#du2020expressive]_ of the circuit for better trainability of the entire model. Below, we discuss various # strategies and design principles for constructing individual quantum circuits, where the resulting # ensembles can be optimized with classical optimisation methods. -# +# ###################################################################### # We compare the post-variational strategies to the conventional variational :doc:`quantum neural network ` in the # table below. -# +# ###################################################################### # |image2| -# +# # .. |image2| image:: ../_static/demonstration_assets/post-variational_quantum_neural_networks/table.png # :width: 100.0% -# +# ###################################################################### # This example demonstrates how to employ the post-variational quantum neural network on the classical # machine learning task of image classification. In this demo we will solve the problem of identifying handwritten # digits of twos and sixes and obtain training performance better than that of variational # algorithms. This dataset is chosen such that the differences between the variational and post-variational approach -# are shown, but we note that the performances may vary for different datasets. -# +# are shown, but we note that the performances may vary for different datasets. +# ###################################################################### # The learning problem # -------------------- -# +# ###################################################################### # We will begin by training our models on the digits dataset, which we import using `sklearn`. The dataset has greyscale # images the size of :math:`8\times 8` pixels. We partition :math:`10\%` of the dataset for # testing. -# +# import pennylane as qml from pennylane import numpy as np @@ -97,7 +97,7 @@ import matplotlib.colors import warnings warnings.filterwarnings("ignore") -np.random.seed(42) +np.random.seed(42) # Load the digits dataset with features (X_digits) and labels (y_digits) X_digits, y_digits = load_digits(return_X_y=True) @@ -127,12 +127,13 @@ ###################################################################### # A visualization of a few data points is shown below. -# +# -fig, axes = plt.subplots(nrows=1, ncols=5, layout="constrained") -for i in range(5): - axes[i].matshow(X_train[2*i]) - axes[i].axis('off') +fig, axes = plt.subplots(nrows=2, ncols=3, layout="constrained") +for i in range(2): + for j in range(3): + axes[i][j].matshow(X_train[2*(2*j+i)]) + axes[i][j].axis('off') fig.subplots_adjust(hspace=0.0) fig.tight_layout() plt.show() @@ -140,49 +141,49 @@ ###################################################################### # Setting up the model # -------------------- -# +# # Here, we will create a simple quantum machine learning (QML) model for optimization. In particular: -# +# # - We will embed our data through a series of rotation gates, this is called the feature map. # - We will then have an ansatz of rotation gates with parameters' weights. -# +# ###################################################################### # For the feature map, each column of the image is encoded into a single qubit, and each row is # encoded consecutively via alternating rotation-Z and rotation-X gates. The circuit for our feature # map is shown below. -# +# ###################################################################### # |image3| -# +# # .. |image3| image:: ../_static/demonstration_assets/post-variational_quantum_neural_networks/featuremap.png # :width: 100.0% -# +# ###################################################################### # We use the following circuit as our ansatz. This ansatz is also used as backbone for all our # post-variational strategies. Note that when we set all initial parameters to 0, the ansatz evaluates to -# identity. -# +# identity. +# ###################################################################### # |image4| -# +# # .. |image4| image:: ../_static/demonstration_assets/post-variational_quantum_neural_networks/ansatz.png # :width: 100.0% -# +# ###################################################################### # We write code for the above ansatz and feature map as follows. -# +# def feature_map(features): # Apply Hadamard gates to all qubits to create an equal superposition state for i in range(len(features[0])): qml.Hadamard(i) - + # Apply angle embeddings based on the feature values for i in range(len(features)): # For odd-indexed features, use Z-rotation in the angle embedding @@ -197,15 +198,15 @@ def ansatz(params): # Apply RY rotations with the first set of parameters for i in range(8): qml.RY(params[i], wires=i) - + # Apply CNOT gates with adjacent qubits (cyclically connected) to create entanglement for i in range(8): qml.CNOT(wires=[(i - 1) % 8, (i) % 8]) - + # Apply RY rotations with the second set of parameters for i in range(8): qml.RY(params[i + 8], wires=i) - + # Apply CNOT gates with qubits in reverse order (cyclically connected) # to create additional entanglement for i in range(8): @@ -213,13 +214,13 @@ def ansatz(params): ###################################################################### # Variational approach # --------------------- -# +# ###################################################################### # As a baseline comparison, we first test the performance of a shallow variational algorithm on the # digits dataset shown above. We will build the quantum node by combining the above feature map and # ansatz. -# +# dev = qml.device("default.qubit", wires=8) @@ -295,11 +296,11 @@ def optimization_jit(params, data, targets): ###################################################################### # In this example, the variational algorithm is having trouble finding a global minimum (and this -# problem persists even if we do hyperparameter tuning). On the other hand, given the general applicability -# and consequent hardness of finding suitable ansätze, we introduce three heursitical methods for building -# the set of quantum circuits that make up post-variational quantum neural networks, namely the observable +# problem persists even if we do hyperparameter tuning). On the other hand, given the general applicability +# and consequent hardness of finding suitable ansätze, we introduce three heursitical methods for building +# the set of quantum circuits that make up post-variational quantum neural networks, namely the observable # construction heuristic, the ansatz expansion heuristic, and a hybrid of the two. -# +# ###################################################################### # Observable construction heuristic @@ -307,17 +308,17 @@ def optimization_jit(params, data, targets): ###################################################################### # The observable construction heuristic removes the use of ansätze in the quantum and constructs measurements -# directly on the quantum data embedded state. +# directly on the quantum data embedded state. # For simplicity, we measure the data embedded state on different combinations of Pauli observables in this # Demo. We first define a series of :math:`k`-local trial observables # :math:`O_1, O_2, \ldots , O_m`. After computing the quantum circuits, the measurement results are # then combined classically, where the optimal weights of each measurement are computed via feeding our # measurements through a classical multilayer perceptron. -# +# ###################################################################### # We generate the series of :math:`k`-local observables with the following code. -# +# def local_pauli_group(qubits: int, locality: int): assert locality <= qubits, f"Locality must not exceed the number of qubits." @@ -331,7 +332,7 @@ def generate_paulis(identities: int, paulis: int, output: str, qubits: int, loca else: # Recursive case: add an "I" (identity) to the output string. yield from generate_paulis(identities + 1, paulis, output + "I", qubits, locality) - + # If the number of Pauli operators used is less than the locality, add "X", "Y", or "Z" # systematically builds all possible Pauli strings that conform to the specified locality. if paulis < locality: @@ -344,7 +345,7 @@ def generate_paulis(identities: int, paulis: int, output: str, qubits: int, loca # For each image sample, we measure the output of the quantum circuit using the :math:`k`-local observables # sequence, and perform logistic regression on these outputs. We do this for 1-local, 2-local and # 3-local observables in the `for`-loop below. -# +# # Initialize lists to store training and testing accuracies for different localities. train_accuracies_O = [] @@ -352,7 +353,7 @@ def generate_paulis(identities: int, paulis: int, output: str, qubits: int, loca for locality in range(1, 4): print(str(locality) + "-local: ") - + # Define a quantum device with 8 qubits using the default simulator. dev = qml.device("default.qubit", wires=8) @@ -361,34 +362,34 @@ def generate_paulis(identities: int, paulis: int, output: str, qubits: int, loca def circuit(features): # Generate all possible Pauli strings for the given locality. measurements = local_pauli_group(8, locality) - + # Apply the feature map to encode classical data into quantum states. feature_map(features) - + # Measure the expectation values of the generated Pauli operators. return [qml.expval(qml.pauli.string_to_pauli_word(measurement)) for measurement in measurements] # Vectorize the quantum circuit function to apply it to multiple data points in parallel. vcircuit = jax.vmap(circuit) - + # Transform the training and testing datasets by applying the quantum circuit. new_X_train = np.asarray(vcircuit(jnp.array(X_train))).T new_X_test = np.asarray(vcircuit(jnp.array(X_test))).T - + # Train a Multilayer Perceptron (MLP) classifier on the transformed training data. clf = MLPClassifier(early_stopping=True).fit(new_X_train, y_train) - + # Print the log loss for the training data. print("Training loss: ", log_loss(y_train, clf.predict_proba(new_X_train))) - + # Print the log loss for the testing data. print("Testing loss: ", log_loss(y_test, clf.predict_proba(new_X_test))) - + # Calculate and store the training accuracy. acc = clf.score(new_X_train, y_train) train_accuracies_O.append(acc) print("Training accuracy: ", acc) - + # Calculate and store the testing accuracy. acc = clf.score(new_X_test, y_test) test_accuracies_O.append(acc) @@ -404,9 +405,9 @@ def circuit(features): # Create a bar plot to visualize the training and testing accuracies. fig, ax = plt.subplots(layout="constrained") # Training accuracy bars: -rects = ax.bar(x, train_accuracies_O, width, label="Training", color="#FF87EB") +rects = ax.bar(x, train_accuracies_O, width, label="Training", color="#FF87EB") # Testing accuracy bars: -rects = ax.bar(x + width, test_accuracies_O, width, label="Testing", color="#70CEFF") +rects = ax.bar(x + width, test_accuracies_O, width, label="Testing", color="#70CEFF") ax.bar_label(rects, padding=3) ax.set_xlabel("Locality") ax.set_ylabel("Accuracy") @@ -420,29 +421,29 @@ def circuit(features): # We can see that the highest accuracy is achieved with the 3-local observables, which gives the # classical model the most information about the outputs of the circuit. However, this is much # more computationally resource heavy than its lower-locality counterparts. Note, however, that the -# complexity of the observable construction method for local observables can be vastly decreased by +# complexity of the observable construction method for local observables can be vastly decreased by # introducing the usage of classical shadows. [#huang2020predicting]_ -# +# ###################################################################### # Ansatz expansion heuristic # --------------------- -# +# ###################################################################### # The ansatz expansion approach does model approximation by directly expanding the parameterised # ansatz into an ensemble of fixed ansätze. Starting from a variational ansatz, multiple # non-parameterized quantum circuits are constructed by Taylor expansion of the ansatz around a # suitably chosen initial setting of the parameters :math:`\theta_0`, which we set here as 0. Gradients and higher-order -# derivatives of circuits then can be obtained by the :doc:`parameter-shift rule `. -# The output sof the different circuits are then fed +# derivatives of circuits then can be obtained by the :doc:`parameter-shift rule `. +# The output sof the different circuits are then fed # into a classical neural network. -# +# ###################################################################### # The following code is used to generate a series of fixed parameters that can be encoded into the # ansatz, using the above method. -# +# def deriv_params(thetas: int, order: int): # This function generates parameter shift values for calculating derivatives @@ -453,11 +454,11 @@ def deriv_params(thetas: int, order: int): def generate_shifts(thetas: int, order: int): # Generate all possible combinations of parameters to shift for the given order. shift_pos = list(combinations(np.arange(thetas), order)) - + # Initialize a 3D array to hold the shift values. # Shape: (number of combinations, 2^order, thetas) params = np.zeros((len(shift_pos), 2 ** order, thetas)) - + # Iterate over each combination of parameter shifts. for i in range(len(shift_pos)): # Iterate over each possible binary shift pattern for the given order. @@ -471,30 +472,30 @@ def generate_shifts(thetas: int, order: int): else: # If the bit is 0, decrement the corresponding parameter. params[i][j][shift_pos[i][k]] -= 1 - + # Reshape the parameters array to collapse the first two dimensions. params = np.reshape(params, (-1, thetas)) return params # Start with a list containing a zero-shift array for all parameters. param_list = [np.zeros((1, thetas))] - + # Append the generated shift values for each order from 1 to the given order. for i in range(1, order + 1): param_list.append(generate_shifts(thetas, i)) - + # Concatenate all the shift arrays along the first axis to create the final parameter array. params = np.concatenate(param_list, axis=0) - + # Scale the shift values by π/2. params *= np.pi / 2 - + return params ###################################################################### # We construct the circuit and measure the top qubit with Pauli-Z. -# +# n_wires = 8 dev = qml.device("default.qubit", wires=n_wires) @@ -509,7 +510,7 @@ def circuit(features, params, n_wires=8): ###################################################################### # For each image sample, we measure the outputs of each parameterised circuit for each feature, and # feed the outputs into a multilayer perceptron. -# +# # Initialize lists to store training and testing accuracies for different derivative orders. train_accuracies_AE = [] @@ -518,7 +519,7 @@ def circuit(features, params, n_wires=8): # Loop through different derivative orders (1st order, 2nd order, 3rd order). for order in range(1, 4): print("Order number: " + str(order)) - + # Generate the parameter shifts required for the given derivative order. to_measure = deriv_params(16, order) @@ -528,27 +529,27 @@ def circuit(features, params, n_wires=8): for thing in X_train: result = circuit(thing, to_measure.T) new_X_train.append(result) - + # Transform the testing dataset similarly. new_X_test = [] for thing in X_test: result = circuit(thing, to_measure.T) new_X_test.append(result) - + # Train a Multilayer Perceptron (MLP) classifier on the transformed training data. clf = MLPClassifier(early_stopping=True).fit(new_X_train, y_train) - + # Print the log loss for the training data. print("Training loss: ", log_loss(y_train, clf.predict_proba(new_X_train))) - + # Print the log loss for the testing data. print("Testing loss: ", log_loss(y_test, clf.predict_proba(new_X_test))) - + # Calculate and store the training accuracy. acc = clf.score(new_X_train, y_train) train_accuracies_AE.append(acc) print("Training accuracy: ", acc) - + # Calculate and store the testing accuracy. acc = clf.score(new_X_test, y_test) test_accuracies_AE.append(acc) @@ -573,14 +574,14 @@ def circuit(features, params, n_wires=8): plt.show() ###################################################################### -# Note that similar to the obsewrvable construction method, higher orders give higher testing accuracy. +# Note that similar to the obsewrvable construction method, higher orders give higher testing accuracy. # However, it is similarly more computationally expensive to execute. -# +# ###################################################################### # Hybrid strategy # --------------------- -# +# ###################################################################### # When taking the strategy of observable construction, one additionally may want to use ansatz @@ -588,14 +589,14 @@ def circuit(features, params, n_wires=8): # strategy that combines both the usage of ansatz expansion and observable construction. For each # feature, we may first expand the ansatz with each of our parameters, then use each :math:`k`-local # observable to conduct measurements. -# +# # Due to the high number of circuits that need to be computed with this strategy, one may choose to # further prune the circuits used in training, but this is not conducted in this demo. -# +# # Note that in our example, we have only tested 3 hybrid samples to reduce the running time of this # script, but one may choose to try other combinations of the 2 strategies to potentially obtain # better results. -# +# # Initialize matrices to store training and testing accuracies for different # combinations of locality and order. @@ -669,7 +670,7 @@ def circuit(features, params): # Upon obtaining our hybrid results, we may now combine these results with that of the observable # construction and ansatz expansion menthods, and plot all the post-variational strategies together on # a heatmap. -# +# for locality in range(1, 4): train_accuracies[locality][0] = train_accuracies_O[locality - 1] @@ -728,39 +729,39 @@ def circuit(features, params): ###################################################################### # Experimental results # -------------------- -# +# ###################################################################### # This demonstration shows that all used hybrid methods exceed the variational algorithm while using the same -# ansatz for the ansatz expansion and hybrid strategies. However, we do not expect all post-variational methods to outperform variational algorithm. -# For example, the ansatz expansion up to the first order is likely to be worse than the variational approach, as it is merely a one-step gradient update. -# -# From these performance results, we can obtain a glimpse of the effectiveness of each strategy. +# ansatz for the ansatz expansion and hybrid strategies. However, we do not expect all post-variational methods to outperform variational algorithm. +# For example, the ansatz expansion up to the first order is likely to be worse than the variational approach, as it is merely a one-step gradient update. +# +# From these performance results, we can obtain a glimpse of the effectiveness of each strategy. # The inclusion of 1-local and 2-local observables provides a boost in accuracy when used # in conjunction with first-order derivatives in the hybrid strategy. This implies that the addition # of the observable expansion strategy can serve as a heuristic to expand the expressibility to # ansatz expansion method, which in itself may not be sufficient as a good training strategy. -# +# ###################################################################### # Conclusion # --------------------- -# +# ###################################################################### # This tutorial demonstrates post-variational quantum neural networks [#huang2024postvariational]_, # an alternative implementation of quantum neural networks in the NISQ setting. # In this tutorial, we have implemented the post variational strategies to classify handwritten digits # of twos and sixes. -# +# # Given a well-selected set of good fixed ansätze, the post-variational method involves training classical # neural networks, to which we can employ techniques to ensure good trainability. While this property of -# post-variational methods provides well-optimised result based on the set of ansätze given, +# post-variational methods provides well-optimised result based on the set of ansätze given, # the barren plateau problems or the related exponential concentration are not directly resolved. The hardness of the problem is # instead delegated to the selection of the set of fixed ansätze from an exponential amount of # possible quantum circuits, which one can find using the three heuristical strategies introduced in this tutorial. # -# +# ###################################################################### # @@ -769,44 +770,44 @@ def circuit(features, params): # # .. [#cerezo2021variational] # -# M. Cerezo, A. Arrasmith, R. Babbush, S. C. Benjamin, S. Endo, K. Fujii, -# J. R. McClean, K. Mitarai, X. Yuan, L. Cincio, and P. J. Coles, -# Variational quantum algorithms, +# M. Cerezo, A. Arrasmith, R. Babbush, S. C. Benjamin, S. Endo, K. Fujii, +# J. R. McClean, K. Mitarai, X. Yuan, L. Cincio, and P. J. Coles, +# Variational quantum algorithms, # `Nat. Rev. Phys. 3, 625, (2021) `__. # # # .. [#schuld2019evaluating] # -# M. Schuld, V. Bergholm, C. Gogolin, J. Izaac, and N. Killoran, +# M. Schuld, V. Bergholm, C. Gogolin, J. Izaac, and N. Killoran, # Evaluating analytic gradients on quantum hardware, -# `Phys. Rev. A. 99, 032331, (2019) `__. -# +# `Phys. Rev. A. 99, 032331, (2019) `__. +# # # .. [#mcclean2018barren] # # J. R. McClean, S. Boixo, V. N. Smelyanskiy, R. Babbush, and H. Neven, -# Barren plateaus in quantum neural network training landscapes, +# Barren plateaus in quantum neural network training landscapes, # `Nat. Commun. 9, 4812, (2018) `__. # # # .. [#huang2024postvariational] # -# P.-W. Huang and P. Rebentrost, -# Post-variational quantum neural networks (2024), +# P.-W. Huang and P. Rebentrost, +# Post-variational quantum neural networks (2024), # `arXiv:2307.10560 [quant-ph] `__. # # # .. [#du2020expressive] # -# Y. Du, M.-H. Hsieh, T. Liu, and D. Tao, -# Expressive power of parametrized quantum circuits, +# Y. Du, M.-H. Hsieh, T. Liu, and D. Tao, +# Expressive power of parametrized quantum circuits, # `Phys. Rev. Res. 2, 033125 (2020) `__. # # # .. [#huang2020predicting] # -# H.-Y. Huang, R. Kueng, and J. Preskill, -# Predicting many properties of a quantum system from very few measurements, +# H.-Y. Huang, R. Kueng, and J. Preskill, +# Predicting many properties of a quantum system from very few measurements, # `Nat. Phys. 16, 1050–1057 (2020) `__. # # diff --git a/demonstrations/tutorial_qaoa_intro.metadata.json b/demonstrations/tutorial_qaoa_intro.metadata.json index f362d99d5f..60c7c87e09 100644 --- a/demonstrations/tutorial_qaoa_intro.metadata.json +++ b/demonstrations/tutorial_qaoa_intro.metadata.json @@ -28,5 +28,6 @@ "id": "tutorial_qaoa_maxcut", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/qaoa-and-optimization/5188" } diff --git a/demonstrations/tutorial_qaoa_maxcut.metadata.json b/demonstrations/tutorial_qaoa_maxcut.metadata.json index cbb2bc6bcf..3d520b1b25 100644 --- a/demonstrations/tutorial_qaoa_maxcut.metadata.json +++ b/demonstrations/tutorial_qaoa_maxcut.metadata.json @@ -30,5 +30,6 @@ "id": "tutorial_qaoa_intro", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/qaoa-and-optimization/5188" } diff --git a/demonstrations/tutorial_qpe.metadata.json b/demonstrations/tutorial_qpe.metadata.json index 512fac8cab..5e385acff9 100644 --- a/demonstrations/tutorial_qpe.metadata.json +++ b/demonstrations/tutorial_qpe.metadata.json @@ -55,5 +55,6 @@ "id": "tutorial_phase_kickback", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/introduction-to-quantum-phase-estimation-demo/7337" } diff --git a/demonstrations/tutorial_quantum_circuit_cutting.metadata.json b/demonstrations/tutorial_quantum_circuit_cutting.metadata.json index 26b07e3392..fd342ce51d 100644 --- a/demonstrations/tutorial_quantum_circuit_cutting.metadata.json +++ b/demonstrations/tutorial_quantum_circuit_cutting.metadata.json @@ -2,17 +2,17 @@ "title": "Quantum Circuit Cutting", "authors": [ { - "id": "gideon_uchehara" + "username": "gideonuchehara" }, { - "id": "matija_medvidovic" + "username": "Matematija" }, { "id": "anuj_apte" } ], "dateOfPublication": "2022-09-02T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-18T00:00:00+00:00", "categories": [ "Algorithms", "Quantum Computing" ], diff --git a/demonstrations/tutorial_quantum_gans.metadata.json b/demonstrations/tutorial_quantum_gans.metadata.json index ad8e932038..4e5287208f 100644 --- a/demonstrations/tutorial_quantum_gans.metadata.json +++ b/demonstrations/tutorial_quantum_gans.metadata.json @@ -47,5 +47,6 @@ "id": "tutorial_QGAN", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/questions-on-qgan-tutorial/2607" } diff --git a/demonstrations/tutorial_quantum_transfer_learning.metadata.json b/demonstrations/tutorial_quantum_transfer_learning.metadata.json index 0cc03cce27..91708c0cb6 100644 --- a/demonstrations/tutorial_quantum_transfer_learning.metadata.json +++ b/demonstrations/tutorial_quantum_transfer_learning.metadata.json @@ -59,5 +59,6 @@ ], "basedOnPapers": [], "referencedByPapers": [], - "relatedContent": [] + "relatedContent": [], + "discussionForumUrl": "https://discuss.pennylane.ai/t/can-quantum-transfer-learning-be-used-for-multi-classification/3963" } diff --git a/demonstrations/tutorial_quanvolution.metadata.json b/demonstrations/tutorial_quanvolution.metadata.json index b24840ac75..974be53cc6 100644 --- a/demonstrations/tutorial_quanvolution.metadata.json +++ b/demonstrations/tutorial_quanvolution.metadata.json @@ -32,5 +32,6 @@ ], "basedOnPapers": ["10.48550/arXiv.1904.04767"], "referencedByPapers": [], - "relatedContent": [] + "relatedContent": [], + "discussionForumUrl": "https://discuss.pennylane.ai/t/quanvolutional-neural-networks/7296" } diff --git a/demonstrations/tutorial_qubit_rotation.metadata.json b/demonstrations/tutorial_qubit_rotation.metadata.json index a8973dad85..4c20e39c43 100644 --- a/demonstrations/tutorial_qubit_rotation.metadata.json +++ b/demonstrations/tutorial_qubit_rotation.metadata.json @@ -39,5 +39,6 @@ "weight": 1.0 } ], - "hardware": [] + "hardware": [], + "discussionForumUrl": "https://discuss.pennylane.ai/t/basic-tutorial-qubit-rotation/7338" } diff --git a/demonstrations/tutorial_sc_qubits.metadata.json b/demonstrations/tutorial_sc_qubits.metadata.json index 5dd68bb0ca..43da743cf9 100644 --- a/demonstrations/tutorial_sc_qubits.metadata.json +++ b/demonstrations/tutorial_sc_qubits.metadata.json @@ -112,5 +112,6 @@ "id": "tutorial_photonics", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/quantum-computing-with-superconducting-qubits-demo/7336" } diff --git a/demonstrations/tutorial_tn_circuits.metadata.json b/demonstrations/tutorial_tn_circuits.metadata.json index 957950bb1c..bbdb369786 100644 --- a/demonstrations/tutorial_tn_circuits.metadata.json +++ b/demonstrations/tutorial_tn_circuits.metadata.json @@ -56,5 +56,6 @@ "id": "tutorial_variational_classifier", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/tensor-network-quantum-circuits-demo/7340" } diff --git a/demonstrations/tutorial_trapped_ions.metadata.json b/demonstrations/tutorial_trapped_ions.metadata.json index 735d188455..567d9488d8 100644 --- a/demonstrations/tutorial_trapped_ions.metadata.json +++ b/demonstrations/tutorial_trapped_ions.metadata.json @@ -177,5 +177,6 @@ "id": "tutorial_photonics", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/trapped-ion-quantum-computers-demo/7341" } diff --git a/demonstrations/tutorial_unitary_designs.metadata.json b/demonstrations/tutorial_unitary_designs.metadata.json index d97b3337e7..927e0b2694 100644 --- a/demonstrations/tutorial_unitary_designs.metadata.json +++ b/demonstrations/tutorial_unitary_designs.metadata.json @@ -2,11 +2,11 @@ "title": "Unitary designs", "authors": [ { - "id": "olivia_di_matteo" + "username": "glassnotes" } ], "dateOfPublication": "2021-09-07T00:00:00+00:00", - "dateOfLastModification": "2024-10-07T00:00:00+00:00", + "dateOfLastModification": "2024-10-11T00:00:00+00:00", "categories": [ "Quantum Machine Learning", "Quantum Computing" ], diff --git a/demonstrations/tutorial_unitary_designs.py b/demonstrations/tutorial_unitary_designs.py index 07cb0290cb..828a33156e 100644 --- a/demonstrations/tutorial_unitary_designs.py +++ b/demonstrations/tutorial_unitary_designs.py @@ -737,8 +737,4 @@ def null_postprocessing_fn(results): # M. Gaeta, O. Di Matteo, A. B. Klimov, and H. de Guise (2014) *Discrete phase-space # approach to mutually orthogonal Latin squares*. J. Phys. A: Math. Theor. 47 (43) 435303. # `(arXiv) `__. -# -# -# About the author -# ---------------- -# .. include:: ../_static/authors/olivia_di_matteo.txt \ No newline at end of file +# \ No newline at end of file diff --git a/demonstrations/tutorial_variational_classifier.metadata.json b/demonstrations/tutorial_variational_classifier.metadata.json index cebd93f3a7..3941e9212a 100644 --- a/demonstrations/tutorial_variational_classifier.metadata.json +++ b/demonstrations/tutorial_variational_classifier.metadata.json @@ -38,5 +38,6 @@ "id": "ensemble_multi_qpu", "weight": 1.0 } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/variational-classifier-demo-question-on-padding/3367" } diff --git a/demonstrations/tutorial_vqe.metadata.json b/demonstrations/tutorial_vqe.metadata.json index b809cec37f..8cc82fc5dd 100644 --- a/demonstrations/tutorial_vqe.metadata.json +++ b/demonstrations/tutorial_vqe.metadata.json @@ -74,5 +74,6 @@ "link": "https://github.com/amazon-braket/amazon-braket-examples/blob/main/examples/pennylane/3_Hydrogen_Molecule_geometry_with_VQE/3_Hydrogen_Molecule_geometry_with_VQE.ipynb", "logo": "/_static/hardware_logos/aws.png" } - ] + ], + "discussionForumUrl": "https://discuss.pennylane.ai/t/a-brief-overview-of-vqe-demo/7333" } diff --git a/poetry.lock b/poetry.lock index c6b12bb177..bdfde6b0c9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1316,13 +1316,6 @@ files = [ {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa42a605d099ee7d41ba2b5fb75e21423951fd26e5d50583a00471238fb3021d"}, {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b7764de0d855338abefc6e3ee9fe40d301668310aa3baea3f778ff051f4393"}, {file = "dm_tree-0.1.8-cp311-cp311-win_amd64.whl", hash = "sha256:a5d819c38c03f0bb5b3b3703c60e4b170355a0fc6b5819325bf3d4ceb3ae7e80"}, - {file = "dm_tree-0.1.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ea9e59e0451e7d29aece402d9f908f2e2a80922bcde2ebfd5dcb07750fcbfee8"}, - {file = "dm_tree-0.1.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:94d3f0826311f45ee19b75f5b48c99466e4218a0489e81c0f0167bda50cacf22"}, - {file = "dm_tree-0.1.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:435227cf3c5dc63f4de054cf3d00183790bd9ead4c3623138c74dde7f67f521b"}, - {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09964470f76a5201aff2e8f9b26842976de7889300676f927930f6285e256760"}, - {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:75c5d528bb992981c20793b6b453e91560784215dffb8a5440ba999753c14ceb"}, - {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a94aba18a35457a1b5cd716fd7b46c5dafdc4cf7869b4bae665b91c4682a8e"}, - {file = "dm_tree-0.1.8-cp312-cp312-win_amd64.whl", hash = "sha256:96a548a406a6fb15fe58f6a30a57ff2f2aafbf25f05afab00c8f5e5977b6c715"}, {file = "dm_tree-0.1.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8c60a7eadab64c2278861f56bca320b2720f163dca9d7558103c3b77f2416571"}, {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af4b3d372f2477dcd89a6e717e4a575ca35ccc20cc4454a8a4b6f8838a00672d"}, {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de287fabc464b8734be251e46e06aa9aa1001f34198da2b6ce07bd197172b9cb"}, @@ -2183,7 +2176,6 @@ description = "Python AST that abstracts the underlying Python version" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" files = [ - {file = "gast-0.6.0-py3-none-any.whl", hash = "sha256:52b182313f7330389f72b069ba00f174cfe2a06411099547288839c6cbafbd54"}, {file = "gast-0.6.0.tar.gz", hash = "sha256:88fc5300d32c7ac6ca7b515310862f71e6fdf2c029bbec7c66c0f5dd47b6b1fb"}, ] @@ -5563,29 +5555,43 @@ tests = ["coverage", "pytest", "pytest-cov"] [[package]] name = "qulacs" -version = "0.1.10.1" +version = "0.6.1" description = "Quantum circuit simulator for research" optional = false python-versions = "*" files = [ - {file = "Qulacs-0.1.10.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:661a4ffa036867c742c4e14539f036179c25970bec499c84c0cea6489297fdfc"}, - {file = "Qulacs-0.1.10.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:34bb1f2de70bd4fbed452e81215ea6fabb228f3c2046f7913b6ae97b9c02c3f5"}, - {file = "Qulacs-0.1.10.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:3c3d3e30c5982954d9449fb5403f7bd448ad8b1f480b6a989afc02855aa8b18b"}, - {file = "Qulacs-0.1.10.1-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:50b66546f38acf0746a8057f416e1d087205db0edf4127db5d6b0d05541962e9"}, - {file = "Qulacs-0.1.10.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:89bdb2ed6c62c6f3c02cea1f6296877967631b8d3511be1f85287b674e178226"}, - {file = "Qulacs-0.1.10.1-cp35-cp35m-win_amd64.whl", hash = "sha256:96c948587a83a6f2c42686f7f684b61d028eb70be64d271e807ddad94e083336"}, - {file = "Qulacs-0.1.10.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:6120f10a5553d713f18d756eae2d76c7c2334bf2c9e1405589d6992a792e6c18"}, - {file = "Qulacs-0.1.10.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:40ea157a9bf1975e79c67d4e314f89a138ff60a751ede1e82118ec68ec1eccbc"}, - {file = "Qulacs-0.1.10.1-cp36-cp36m-win_amd64.whl", hash = "sha256:5e1d8dc0c01c455a5a84d163a7ecfc8476e219ea5a31a1df387231fcce870ff1"}, - {file = "Qulacs-0.1.10.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5db539ac42b8b33d3b42cc5f759e82d1fff2e9f6d61a365da611083b048fd487"}, - {file = "Qulacs-0.1.10.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:68510e18f9c9024408a9a7c0adfc79f8d8eabe79610719f2247989010149fe42"}, - {file = "Qulacs-0.1.10.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ec687a6e1dc253e26939b068322d6f717f307108f7f5037401d0d98509781362"}, - {file = "Qulacs-0.1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7a17363d1992d299d5d71b14d47d603316c57404bb9699a1893d3fc3a3b0b058"}, - {file = "Qulacs-0.1.10.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:621e84f7efbee9b76ee047066cb94cf328ec94fbcab789ac47ebed1fa5a130c5"}, - {file = "Qulacs-0.1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:cf8cd499d9c2fa99ef79e6f800e24cba46d2a1e56a3ce30df288a29deb6458a9"}, - {file = "Qulacs-0.1.10.1.tar.gz", hash = "sha256:d4313b1792b55bbbfea88ce6291185ed48f9da712418aa9f9931c1c8a8226e6c"}, + {file = "qulacs-0.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f1f1951e9cb55e11ded47b7a652bb03b06f986ba19e6c2ea76b40b159f297f62"}, + {file = "qulacs-0.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7b922972a7b9a827c16dc9e2c14478f276ead2eacad7d545db03ab795854b94f"}, + {file = "qulacs-0.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6892d39d46e1b5bd89def6cb96e67316abcb04ba1e1a5d894a2d7a155c0eb27"}, + {file = "qulacs-0.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:df0f0d0fe6f53ea628960ab32239ba0dd92a5e4b88a2936a24442fd64e58e515"}, + {file = "qulacs-0.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fea830bebadd34f07fb35680c11e7cd712343ea8dbead0d5ef417c70605a4ea4"}, + {file = "qulacs-0.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4807a134e00a9d6f20c488036933ad7c570ba07bcb813c8afd2b06f60c8c98a1"}, + {file = "qulacs-0.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ea1c78fda2830a52702cffb7f22b3900282329ac5dcfa7c947dd00b0b126ba5"}, + {file = "qulacs-0.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:dc18d30a7e809786df0007a7bd7a918f53cc60369ae19a671cb55b65ea5056e5"}, + {file = "qulacs-0.6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3f29acc7b86913612ba2b39241b365e0dea9925a4929998dfc2eb94e7d43b81e"}, + {file = "qulacs-0.6.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69f9ce7c836e0b755a158a4cc392b49ea414c6eb37b109ec3149d7b1f13005dc"}, + {file = "qulacs-0.6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:536212f0742efa49a6fc91c4c3349b48e669d47f7881a90f88c491d19ef7e732"}, + {file = "qulacs-0.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:17142f904cb62f688f76feee07742072bfb9817992dbe5fa73da667e04425c55"}, + {file = "qulacs-0.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c50733917f1b01b11e6be177255fab1be7af5563984f9c06827d038b4af4585"}, + {file = "qulacs-0.6.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4bd699a68034a2a29e153d30735b561f57f0796bb4d30b2576d1cce1671e8d0b"}, + {file = "qulacs-0.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:7e251102e157f36ade2b16ff34d65d55515f88ca0bf5be0a93f50232fd08eb97"}, + {file = "qulacs-0.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b553c82db86e9e972b7cfdb816276123cc15aa4877375e8be14c48ec381109aa"}, + {file = "qulacs-0.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:60ed40efbeed8141ca171fceefbfa581d93a961601e4053b731262900724e383"}, + {file = "qulacs-0.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06aaf7a7a01e6179a0362e170c923ac5834b8d0c5433a677dbcfed9aa9fdff02"}, + {file = "qulacs-0.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:331a8084ed08b0e356149e51a756afd6c628fceb070d64c500d2c657351de909"}, + {file = "qulacs-0.6.1.tar.gz", hash = "sha256:f958f26de98a37c519ff092167d440931a3ee77f8f628e2ab7441fe15322c28a"}, ] +[package.dependencies] +numpy = "*" +scipy = "*" + +[package.extras] +ci = ["black", "flake8", "isort", "mypy", "openfermion", "pybind11-stubgen", "pytest"] +dev = ["black", "flake8", "isort", "mypy", "openfermion", "pybind11-stubgen", "pytest"] +doc = ["breathe (==4.33.*)", "exhale (==0.3.*)", "ipykernel (==6.17.*)", "myst-parser (==0.18.*)", "nbsphinx (==0.8.*)", "sphinx (==4.5.0)", "sphinx-autoapi (==2.0.*)", "sphinx-copybutton (==0.5.*)", "sphinx-rtd-theme (==1.0.*)"] +test = ["openfermion"] + [[package]] name = "qutip" version = "4.7.3" @@ -8003,4 +8009,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "~3.10.0" -content-hash = "5a2bc15c89199f5dcf3052e6859936688774e643db3da23edec36ef9d1aed534" +content-hash = "2e6d9c85baec80b413760cf4fb5e42edd02a04c324c7d0bf100ec3f61ee95df2" diff --git a/pyproject.toml b/pyproject.toml index 008a413867..f396be3bc4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,7 +93,7 @@ torchvision = [ scikit-learn = { version = "1.3.0", markers = "platform_machine == 'x86_64'" } tensorflow = { version = "2.14.1", markers = "platform_machine == 'x86_64'" } flamingpy = { version = ">=0.10.1b1", markers = "platform_machine == 'x86_64'" } -qulacs = { version = "0.1.10.1", markers = "platform_machine == 'x86_64'" } +qulacs = { version = "0.6.1", markers = "platform_machine == 'x86_64'" } # The following packages are only installed on MacOS for compatibility tensorflow-macos = { version = "2.14.1", markers = "sys_platform == 'darwin' and platform_machine == 'arm64'" }